<code>def Mont(X,Y):
summ=[]
a=[]
for i in range(n):
summ.append(tf.cast(k(X,Y)[i],tf.float32)*NN(X)[0][0])
a.append((betta[0]/n)*tf.math.reduce_sum(summ,axis=0))
return(a)
def loss():
l = []
g1 = np.array([[g(X)]], dtype='float32')
return tf.reduce_mean(tf.square(NN(X)[0][0][:n] + Mont(X,Y) - g1[0][0]))
loss()
def train():
with tf.GradientTape() as tape:
loss_f = loss()
trainable_variables = list(weights.values()) + list(biases.values())
gradients = tape.gradient(loss_f, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
l1=[]
l3=[]
for i in range(2000):
train()
if i % 10 == 0:
l1.append(loss())
l3.append(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
print('loss',i,loss())
print(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
l2=tf.convert_to_tensor(l1)
l4=tf.convert_to_tensor(l3)
</code>
<code>def Mont(X,Y):
summ=[]
a=[]
for i in range(n):
summ.append(tf.cast(k(X,Y)[i],tf.float32)*NN(X)[0][0])
a.append((betta[0]/n)*tf.math.reduce_sum(summ,axis=0))
return(a)
def loss():
l = []
g1 = np.array([[g(X)]], dtype='float32')
return tf.reduce_mean(tf.square(NN(X)[0][0][:n] + Mont(X,Y) - g1[0][0]))
loss()
def train():
with tf.GradientTape() as tape:
loss_f = loss()
trainable_variables = list(weights.values()) + list(biases.values())
gradients = tape.gradient(loss_f, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
l1=[]
l3=[]
for i in range(2000):
train()
if i % 10 == 0:
l1.append(loss())
l3.append(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
print('loss',i,loss())
print(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
l2=tf.convert_to_tensor(l1)
l4=tf.convert_to_tensor(l3)
</code>
def Mont(X,Y):
summ=[]
a=[]
for i in range(n):
summ.append(tf.cast(k(X,Y)[i],tf.float32)*NN(X)[0][0])
a.append((betta[0]/n)*tf.math.reduce_sum(summ,axis=0))
return(a)
def loss():
l = []
g1 = np.array([[g(X)]], dtype='float32')
return tf.reduce_mean(tf.square(NN(X)[0][0][:n] + Mont(X,Y) - g1[0][0]))
loss()
def train():
with tf.GradientTape() as tape:
loss_f = loss()
trainable_variables = list(weights.values()) + list(biases.values())
gradients = tape.gradient(loss_f, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
l1=[]
l3=[]
for i in range(2000):
train()
if i % 10 == 0:
l1.append(loss())
l3.append(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
print('loss',i,loss())
print(tf.reduce_mean(tf.square((tf.ones(n)-NN(X)[0][0][:n]))))
l2=tf.convert_to_tensor(l1)
l4=tf.convert_to_tensor(l3)
The error value is fixed from somewhere and the program is stopped. Monte Carlo approximation and loss function and train are explained in the code. I am not sure that the above functions are correct…
New contributor
Fatemeh is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.