  Unified neural network in Python

NumPy | Python Methods and Functions

One neuron converts this input to some output. Depending on the given input and the weights assigned to each input, decide whether the neuron has fired or not. Suppose a neuron has 3 input connections and one output.

We will use

# Python program to implement
# neural network of one neuron

# import all required libraries

from numpy import exp, array, random, dot, tanh

# Class for creating neural
# network with one neuron

class NeuralNetwork ():

def __ init __ ( self ):

# Use seeds to make sure this is

# generate the same weights every time you run

random.seed ( 1 )

# 3x1 Weight Matrix

self . weight_matrix = 2 * random.random (( 3 , 1 )) - 1

# tanh as an activation function

def tanh ( self , x):

return tanh (x)

# derivative of the Tan function.

def tanh_derivative ( self , x):

return 1.0 - tanh (x) * * 2

# direct distribution

def forward_propagation ( self , inputs):

return self . tanh (dot (inputs, self . weight_matrix))

# train the neural network.

def train ( self , train_inputs , train_outputs,

num_train_iterations):

# Number of iterations we want

# execute input for this set.

for iteration in range (num_train_iterations):

output = self . forward_propagation (train_inputs)

# Calculate an error in the output.

error = train_outputs - output

# multiply the error by the input, then

# along the gradient of the tanh function for calculation

adjustment =  dot (train_inputs.T, error *

self . tanh_derivative (output))

self . weight_matrix + = adjustment

Code driver

if __ name__ = = "__ main__" :

neural_network = NeuralNetwork ()

print ( ' Random weights at the start of training' )

print (neural_network.weight_matrix)

train_inputs = array ([[ 0 , 0 , 1 ], [ 1 , 1 , 1 ], [ 1 , 0 , 1 ], [ 0 , 1 , 1 ]])

train_outputs = array ([[ 0 , 1 , 1 , 0 ]]). T

neural_network.train (train_inputs, train_outputs, 10000 )

print ( 'New weights after training' )

print (neural_network.weight_matrix)

# Testing the neural network in a new situation.

print ( " Testing network on new examples - & gt; " )

print (neural_network.forward_propagation (array ([ 1 , 0 , 0 ])))

Output:

Random weights at the start of training [[-0.16595599] [0.44064899] [-0.99977125] ] New weights after training [[5.39428067] [0.19482422] [0.34317086]] Testing network on new examples - & gt; [0.99995873]