Classifying handwritten digits#

A single-layer 10-class classifier consisting of 10 neurons with the Softmax activation function and an input layer of 784 pixels.

1raise SystemExit("Stop right there!");
An exception has occurred, use %tb to see the full traceback.

SystemExit: Stop right there!

Importing libraries and packages#

 1# System
 2import os
 3
 4# Mathematical operations and data manipulation
 5from pandas import get_dummies
 6
 7# Modelling
 8import tensorflow as tf
 9
10# Statistics
11from sklearn.metrics import accuracy_score
1os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

Loading dataset#

The MNIST dataset is integrated into the TensorFlow library. It consists of 70,000 handwritten images of the digits 0 to 9.

1# Creating an instance of the MNIST dataset
2dataset = tf.keras.datasets.mnist
3# Loading the MNIST dataset's train and test data
4(train_features, train_labels), (
5    test_features,
6    test_labels,
7) = dataset.load_data()

Training of the network#

 1# Normalizing the data
 2train_features, test_features = train_features / 255.0, test_features / 255.0
 3# Flatten the 2-dimensional images into row matrices
 4# (a 28 × 28 pixel image is flattened to 784
 5x = tf.reshape(train_features, [60000, 784])
 6# Creating a Variable with the features and typecasting it to float32
 7x = tf.Variable(x)
 8x = tf.cast(x, tf.float32)
 9# One-hot encoding of the labels and transforming it into a matrix
10y_hot = get_dummies(train_labels)
11y = y_hot.values
 1# Creating single-layer neural network with 10 neurons and
 2# training it for 1000 iterations
 3Number_of_features = 784
 4Number_of_units = 10
 5
 6# Weights and bias
 7weight = tf.Variable(tf.zeros([Number_of_features, Number_of_units]))
 8bias = tf.Variable(tf.zeros([Number_of_units]))
 9
10# Optimizer
11optimizer = tf.optimizers.Adam(0.01)
 1def perceptron(x):
 2    z = tf.add(tf.matmul(x, weight), bias)
 3    output = tf.nn.softmax(z)
 4    return output
 5
 6
 7def train(i):
 8    for n in range(i):
 9        loss = lambda: abs(
10            tf.reduce_mean(
11                tf.nn.softmax_cross_entropy_with_logits(
12                    labels=y, logits=perceptron(x)
13                )
14            )
15        )
16        optimizer.minimize(loss, [weight, bias])
17
18
19# Train the network
20train(1000)

Statistics#

1tf.print(bias)
[-0.499320358 0.642781138 0.152440891 ... 1.30194306 -2.50900364 -1.67989814]
1# Preparing the test data to measure accuracy
2test = tf.reshape(test_features, [10000, 784])
3
4test = tf.Variable(test)
5test = tf.cast(test, tf.float32)
6
7test_hot = get_dummies(test_labels)
8test_matrix = test_hot.values
1# Running the predictions by passing the test data through the network
2ypred = perceptron(test)
3ypred = tf.round(ypred)
1# Measuring the predicted accuracy
2acc = accuracy_score(test_hot, ypred)
3print(acc)
0.9304