I have encountered this problem in a TICTACTOE Neural Network in python. Just a simple AI in the making
It ocurrs after I return the last value from feed forward(the output) . I am kind of a beginner btw
<code> def feedForward(self, input, layer):
#last activation is returned
if layer == len(self.shape):
* print(input) #* here the last activation is actually printed and works**
* return input # *here the last Activation is returned but as Nonetype?**
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
#the Neurons are gone through
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
self.feedForward(self.output, layer + 1)
<code> def feedForward(self, input, layer):
#last activation is returned
if layer == len(self.shape):
* print(input) #* here the last activation is actually printed and works**
* return input # *here the last Activation is returned but as Nonetype?**
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
print(input)
#the Neurons are gone through
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
print("neuron fired")
self.feedForward(self.output, layer + 1)
</code>
def feedForward(self, input, layer):
#last activation is returned
if layer == len(self.shape):
* print(input) #* here the last activation is actually printed and works**
* return input # *here the last Activation is returned but as Nonetype?**
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
print(input)
#the Neurons are gone through
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
print("neuron fired")
self.feedForward(self.output, layer + 1)
in this Method I print out what comes out at last from my FeedForward but it is just Nonetype
as you will see in the Console
<code> def runFF(self, inputs):
*out = self.feedForward(inputs, 0)*
<code> def runFF(self, inputs):
print("output --->"+"n")
*out = self.feedForward(inputs, 0)*
*print(out)*
</code>
def runFF(self, inputs):
print("output --->"+"n")
*out = self.feedForward(inputs, 0)*
*print(out)*
this is the Output:
<code>[1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0]
#Inputs for Fields, e.g. 001 --- Empty? -> 0, is O? -> 0, is X?-> 1 = 001
[0.97608829 0.98383903 0.93957521 0.93431431 0.98965643] # these are the activations for the Layer 1
[0.98738308 0.96916289 0.94868291] Layer 2 and so on...
[0.7176812 0.885665 0.84968832 0.88980995 0.90426031]
[0.95916376 0.95721362 0.91595512 0.93022966 0.94000431 0.86405699
0.93726489 0.89166409 0.89323725] # this is the last activation from the "print(input)" in feedForward()
*None* # this is what happens when I print the returned last activation
<code>[1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0]
#Inputs for Fields, e.g. 001 --- Empty? -> 0, is O? -> 0, is X?-> 1 = 001
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.97608829 0.98383903 0.93957521 0.93431431 0.98965643] # these are the activations for the Layer 1
neuron fired
neuron fired
neuron fired
[0.98738308 0.96916289 0.94868291] Layer 2 and so on...
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.7176812 0.885665 0.84968832 0.88980995 0.90426031]
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.95916376 0.95721362 0.91595512 0.93022966 0.94000431 0.86405699
0.93726489 0.89166409 0.89323725] # this is the last activation from the "print(input)" in feedForward()
*None* # this is what happens when I print the returned last activation
</code>
[1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 1 1 0 0 0 0 1 1 0 0]
#Inputs for Fields, e.g. 001 --- Empty? -> 0, is O? -> 0, is X?-> 1 = 001
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.97608829 0.98383903 0.93957521 0.93431431 0.98965643] # these are the activations for the Layer 1
neuron fired
neuron fired
neuron fired
[0.98738308 0.96916289 0.94868291] Layer 2 and so on...
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.7176812 0.885665 0.84968832 0.88980995 0.90426031]
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
neuron fired
[0.95916376 0.95721362 0.91595512 0.93022966 0.94000431 0.86405699
0.93726489 0.89166409 0.89323725] # this is the last activation from the "print(input)" in feedForward()
*None* # this is what happens when I print the returned last activation
I have used 2 Classes for this NN, this is the Neuron class
<code>class Neuron(object):
def set_weights(self, amount):
self.weights = np.array([np.random.random_sample() for i in range( amount)])
self.bias = np.random.random_sample()
elif var == "activation":
for i in range (len(self.weights)):
a = (a +" "+(str)(self.weights[i]))
return a +"/" + (str)(self.bias) + " "+ (str)(self.activation)
return 1 / (1 + np.exp(-z))
def calculateActivations(self, inputs):
z = np.dot(inputs, self.weights) + self.bias
self.activation = self.sigmoid(z)
<code>class Neuron(object):
def __init__(self):
self.weights = []
self.bias = 0.0
self.activation = 0.0
def set_weights(self, amount):
self.weights = np.array([np.random.random_sample() for i in range( amount)])
def set_bias(self):
self.bias = np.random.random_sample()
def getVar(self, var):
if var == "weights":
return self.weights
elif var == "bias":
return self.bias
elif var == "activation":
return self.activation
elif var == "all":
a = ""
for i in range (len(self.weights)):
a = (a +" "+(str)(self.weights[i]))
return a +"/" + (str)(self.bias) + " "+ (str)(self.activation)
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def calculateActivations(self, inputs):
z = np.dot(inputs, self.weights) + self.bias
self.activation = self.sigmoid(z)
</code>
class Neuron(object):
def __init__(self):
self.weights = []
self.bias = 0.0
self.activation = 0.0
def set_weights(self, amount):
self.weights = np.array([np.random.random_sample() for i in range( amount)])
def set_bias(self):
self.bias = np.random.random_sample()
def getVar(self, var):
if var == "weights":
return self.weights
elif var == "bias":
return self.bias
elif var == "activation":
return self.activation
elif var == "all":
a = ""
for i in range (len(self.weights)):
a = (a +" "+(str)(self.weights[i]))
return a +"/" + (str)(self.bias) + " "+ (str)(self.activation)
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def calculateActivations(self, inputs):
z = np.dot(inputs, self.weights) + self.bias
self.activation = self.sigmoid(z)
Here is the actual MLP (without backpropagation yet just feedForward)
def __init__(self, Board):
self.shape = [5, 3, 5, 9]
self.layout = [27, 5, 3, 5, 9]
def feedForward(self, input, layer):
if layer == len(self.shape):
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
#Die Neuronen in der Layer werden durchgegangen und berechnet
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
self.feedForward(self.output, layer + 1)
for i in range(len(self.shape)):
a = np.empty(self.shape[i], Neuron)
for j in range(self.shape[i]):
def initializeNeurons(self):
for i in range(len(self.shape)):
for j in range(self.shape[i]):
self.neurons[i][j].set_weights(self.layout[i])
self.neurons[i][j].set_bias()
def printAll(self, i ,j):
x = self.neurons[i][j].getVar("all")
for l in range(self.layout[i]):
print("weight " + (str)(l + 1) + " = " + z[l+1] + "n")
out = self.feedForward(inputs, 0)
input = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1])
<code>class MLP(object):
def __init__(self, Board):
self.Board = Board
self.shape = [5, 3, 5, 9]
self.layout = [27, 5, 3, 5, 9]
self.neurons = []
def feedForward(self, input, layer):
#rekursionsanker
if layer == len(self.shape):
print(input)
return input
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
print(input)
#Die Neuronen in der Layer werden durchgegangen und berechnet
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
print("neuron fired")
self.feedForward(self.output, layer + 1)
def createNeurons(self):
for i in range(len(self.shape)):
a = np.empty(self.shape[i], Neuron)
for j in range(self.shape[i]):
a[j] = Neuron()
self.neurons.append(a)
print(self.neurons)
def initializeNeurons(self):
for i in range(len(self.shape)):
for j in range(self.shape[i]):
self.neurons[i][j].set_weights(self.layout[i])
self.neurons[i][j].set_bias()
self.printAll(i, j)
def printAll(self, i ,j):
x = self.neurons[i][j].getVar("all")
y = x.split('/')
z = y[0].split(' ')
for l in range(self.layout[i]):
print("weight " + (str)(l + 1) + " = " + z[l+1] + "n")
print("b/a : " + y[1])
def runFF(self, inputs):
print("output --->"+"n")
out = self.feedForward(inputs, 0)
print(out)
def test(self):
input = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1])
self.runFF(input)
</code>
class MLP(object):
def __init__(self, Board):
self.Board = Board
self.shape = [5, 3, 5, 9]
self.layout = [27, 5, 3, 5, 9]
self.neurons = []
def feedForward(self, input, layer):
#rekursionsanker
if layer == len(self.shape):
print(input)
return input
#output Vektor wird initialisiert mit shape
self.output = np.zeros(self.shape[layer])
print(input)
#Die Neuronen in der Layer werden durchgegangen und berechnet
for i in range(self.shape[layer]):
self.neurons[layer][i].calculateActivations(input)
self.output[i] = self.neurons[layer][i].getVar("activation")
print("neuron fired")
self.feedForward(self.output, layer + 1)
def createNeurons(self):
for i in range(len(self.shape)):
a = np.empty(self.shape[i], Neuron)
for j in range(self.shape[i]):
a[j] = Neuron()
self.neurons.append(a)
print(self.neurons)
def initializeNeurons(self):
for i in range(len(self.shape)):
for j in range(self.shape[i]):
self.neurons[i][j].set_weights(self.layout[i])
self.neurons[i][j].set_bias()
self.printAll(i, j)
def printAll(self, i ,j):
x = self.neurons[i][j].getVar("all")
y = x.split('/')
z = y[0].split(' ')
for l in range(self.layout[i]):
print("weight " + (str)(l + 1) + " = " + z[l+1] + "n")
print("b/a : " + y[1])
def runFF(self, inputs):
print("output --->"+"n")
out = self.feedForward(inputs, 0)
print(out)
def test(self):
input = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1])
self.runFF(input)
there is a test method here so you can go right ahead if you want to
I have spent too much time looking for some sort of a mistake and I am lost
I have looked at other questions similar to mine but i do not seem to be able to draw a sensible conclusion from the others