Forward pass of a Simple RNN#

The Softmax function is also known as the normalized exponential function. It normalizes the input into a probability distribution that sums to 1.

1raise SystemExit("Stop right there!");
An exception has occurred, use %tb to see the full traceback.

SystemExit: Stop right there!

Importing libraries and packages#

 1# System
 2import os
 3
 4# Mathematical operations and data manipulation
 5import numpy as np
 6
 7# Modelling
 8import tensorflow as tf
 9
10from IPython.display import display, HTML
11
12display(HTML("<style>.container {width:80% !important;}</style>"))
1os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

Forward pass#

1np.random.seed(0)
2tf.random.set_seed(0)
1# Number of inputs (2) and the number of neurons in the hidden layer (3)
2num_inputs = 2
3num_neurons = 3
1# Feedforward weights and recurrent weights variables for the weight matrices
2wf = tf.Variable(tf.random.normal(shape=[num_inputs, num_neurons]))
3wr = tf.Variable(tf.random.normal(shape=[num_neurons, num_neurons]))
1# Bias variable (as many values as the number of neurons in the hidden layer)
2bias = tf.Variable(tf.zeros([1, num_neurons]))
1# Creating data
2xt0_batch = np.array([[0, 1], [2, 3], [4, 5]]).astype(np.float32)
3xt1_batch = np.array([[100, 101], [102, 103], [104, 105]]).astype(np.float32)
1def forward_pass(xt0, xt1):
2    yt0 = tf.tanh(tf.matmul(xt0, wf) + bias)
3    yt1 = tf.tanh(tf.matmul(yt0, wr) + tf.matmul(xt1, wf) + bias)
4    return yt0, yt1
5
6
7# Forward pass
8yt0_output, yt1_output = forward_pass(xt0_batch, xt1_batch)
1# Printing the output values yt0 and yt1
2tf.print(yt0_output)
3tf.print(yt1_output)
[[-0.776318431 -0.844548464 0.438419849]
 [-0.0857750699 -0.993522227 0.516408086]
 [0.6983459 -0.999749422 0.586677969]]
[[1 -1 0.99999851]
 [1 -1 0.999998331]
 [1 -1 0.999997377]]