I’m struggling to understanding my own code.
with this Y array:
[0.6, 0.7, 0.8, 0.9, 1.0, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.9, 0.5],
[0.1, 0.3, 0.5, 0.7, 0.9, 0.5],
[0.2, 0.4, 0.6, 0.8, 1.0, 0.5],
[0.0, 0.9, 0.1, 0.8, 0.0, 0.5],
[0.9, 0.0, 0.6, 0.9, 0.0, 0.5]
<code>y = np.array([
[0.6, 0.7, 0.8, 0.9, 1.0, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.9, 0.5],
[0.1, 0.3, 0.5, 0.7, 0.9, 0.5],
[0.2, 0.4, 0.6, 0.8, 1.0, 0.5],
[0.0, 0.9, 0.1, 0.8, 0.0, 0.5],
[0.9, 0.0, 0.6, 0.9, 0.0, 0.5]
])
</code>
y = np.array([
[0.6, 0.7, 0.8, 0.9, 1.0, 0.5],
[0.5, 0.4, 0.3, 0.2, 0.9, 0.5],
[0.1, 0.3, 0.5, 0.7, 0.9, 0.5],
[0.2, 0.4, 0.6, 0.8, 1.0, 0.5],
[0.0, 0.9, 0.1, 0.8, 0.0, 0.5],
[0.9, 0.0, 0.6, 0.9, 0.0, 0.5]
])
and these X inputs:
<code>temperature = np.array([0.2, 0.4, 0.6, 0.8, 0.7, 1.0])
humidity = np.array([0.3, 0.5, 0.7, 0.9, 0.6, 1.0])
precipitation = np.array([0.1, 0.2, 0.4, 0.5, 0.3, 1.0])
wind_speed = np.array([0.4, 0.6, 0.8, 0.9, 0.5, 1.0])
solar_radiation = np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
pressure = np.array([0.3, 0.4, 0.5, 0.6, 0.2, 1.0])
<code>temperature = np.array([0.2, 0.4, 0.6, 0.8, 0.7, 1.0])
humidity = np.array([0.3, 0.5, 0.7, 0.9, 0.6, 1.0])
precipitation = np.array([0.1, 0.2, 0.4, 0.5, 0.3, 1.0])
wind_speed = np.array([0.4, 0.6, 0.8, 0.9, 0.5, 1.0])
solar_radiation = np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
pressure = np.array([0.3, 0.4, 0.5, 0.6, 0.2, 1.0])
</code>
temperature = np.array([0.2, 0.4, 0.6, 0.8, 0.7, 1.0])
humidity = np.array([0.3, 0.5, 0.7, 0.9, 0.6, 1.0])
precipitation = np.array([0.1, 0.2, 0.4, 0.5, 0.3, 1.0])
wind_speed = np.array([0.4, 0.6, 0.8, 0.9, 0.5, 1.0])
solar_radiation = np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
pressure = np.array([0.3, 0.4, 0.5, 0.6, 0.2, 1.0])
I got this output running the code on sequence, right now on this moment
<code>[[0 2 3 4 5 1]
[1 2 3 4 5 0]
[1 2 3 4 5 0]
[1 2 3 4 5 0]
[1 2 3 5 4 0]
[1 2 3 4 5 0]]
[[2 4 1 5 0 3]
[3 1 2 4 5 0]
[3 1 2 4 5 0]
[3 1 2 4 5 0]
[3 1 4 5 2 0]
[1 2 4 5 0 3]]
[[5 3 0 2 1 4]
[1 5 0 3 2 4]
[4 1 5 0 2 3]
[1 5 0 3 2 4]
[0 1 4 5 3 2]
[1 5 0 4 2 3]]
[[1 2 3 4 5 0]
[1 2 3 5 4 0]
[1 2 3 5 4 0]
[1 2 3 5 4 0]
[1 2 3 4 5 0]
[1 2 3 5 4 0]]
</code>
[[0 2 3 4 5 1]
[1 2 3 4 5 0]
[1 2 3 4 5 0]
[1 2 3 4 5 0]
[1 2 3 5 4 0]
[1 2 3 4 5 0]]
[[2 4 1 5 0 3]
[3 1 2 4 5 0]
[3 1 2 4 5 0]
[3 1 2 4 5 0]
[3 1 4 5 2 0]
[1 2 4 5 0 3]]
[[5 3 0 2 1 4]
[1 5 0 3 2 4]
[4 1 5 0 2 3]
[1 5 0 3 2 4]
[0 1 4 5 3 2]
[1 5 0 4 2 3]]
[[1 2 3 4 5 0]
[1 2 3 5 4 0]
[1 2 3 5 4 0]
[1 2 3 5 4 0]
[1 2 3 4 5 0]
[1 2 3 5 4 0]]
But today evening, after running the code several times, i got a very different output. They were all the same, every time I run the code, even if i changed the X and Y. It was like the last output print, but with all columns equally.
I doubt this is not normal. But i’m asking here, because i could not understand what was happening today.
And this was the code i was running:
def custom_output_layer(self, inputs, depth=0):
if depth >= self.max_depth:
return tf.zeros_like(inputs[:, :self.y_shape]) # Change here to match the output size
u, v, w, r, s, a = inputs[:, 0], inputs[:, 1], inputs[:, 2], inputs[:, 3], inputs[:, 4], inputs[:, 5]
c = 1.0 # Assuming c is 1.0
epsilon = 1e-9 # Small constant to prevent division by zero
# First part of the formula
term1_numerator = v + (u - v) / (1 - (u * v) / (c ** 2))
term1_denominator = 1 + (v * (u * v) / (1 - (u * v) / (c ** 2))) / (c ** 2)
term1 = term1_numerator / (term1_denominator + epsilon) # Avoid division by zero
# Second part of the formula
term2_numerator = 4 * w * r
term2_denominator = np.pi * tf.sqrt(1 - (w * r) ** 2 / (c ** 2))
term2 = term2_numerator / (term2_denominator + epsilon) # Avoid division by zero
# Apply the inverse limit of "a"
term_limit = tf.pow(a + epsilon, -1) # Add epsilon to prevent division by zero
result = tf.pow((term1 + term2) * term_limit, self.t) # Use the generic t
# Recursive call to further bifurcate the space
new_inputs = tf.stack([u, v, w, r, s, a], axis=1)
recursive_result = self.custom_output_layer(new_inputs, depth + 1)
# Ensure the recursive result has the correct shape with modular adjustments
recursive_result = tf.concat([
tf.expand_dims(self.mod(recursive_result[:, 0] + a, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 1] + s, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 2] + r, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 3] + w, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 4] + v, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 5] + u, 1.0), axis=1)
# Combine current result with recursive result
combined_result = self.mod(result + recursive_result[:, 0], 1.0)
# Assume the output coordinates (x1, y1, z1, x2, y2, z2) are based on the combined result
y1 = self.mod(combined_result + s, 1.0)
z1 = self.mod(combined_result + r, 1.0)
x2 = self.mod(combined_result + w, 1.0)
y2 = self.mod(combined_result + v, 1.0)
z2 = self.mod(combined_result + u, 1.0)
return tf.stack([x1, y1, z1, x2, y2, z2], axis=1)
input_layer = Input(shape=(self.num_features,)) # Six inputs: u, v, w, r, s, a
hidden_layer = Dense(self.units, activation=self.activation)(input_layer)
hidden_layer = Dense(self.units, activation=self.activation)(hidden_layer)
output_layer = Lambda(lambda x: self.custom_output_layer(x), output_shape=(self.y_shape,))(hidden_layer)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer=self.optimizer, loss=self.loss)
<code>
def custom_output_layer(self, inputs, depth=0):
if depth >= self.max_depth:
return tf.zeros_like(inputs[:, :self.y_shape]) # Change here to match the output size
u, v, w, r, s, a = inputs[:, 0], inputs[:, 1], inputs[:, 2], inputs[:, 3], inputs[:, 4], inputs[:, 5]
c = 1.0 # Assuming c is 1.0
epsilon = 1e-9 # Small constant to prevent division by zero
# First part of the formula
term1_numerator = v + (u - v) / (1 - (u * v) / (c ** 2))
term1_denominator = 1 + (v * (u * v) / (1 - (u * v) / (c ** 2))) / (c ** 2)
term1 = term1_numerator / (term1_denominator + epsilon) # Avoid division by zero
# Second part of the formula
term2_numerator = 4 * w * r
term2_denominator = np.pi * tf.sqrt(1 - (w * r) ** 2 / (c ** 2))
term2 = term2_numerator / (term2_denominator + epsilon) # Avoid division by zero
# Apply the inverse limit of "a"
term_limit = tf.pow(a + epsilon, -1) # Add epsilon to prevent division by zero
result = tf.pow((term1 + term2) * term_limit, self.t) # Use the generic t
# Recursive call to further bifurcate the space
new_inputs = tf.stack([u, v, w, r, s, a], axis=1)
recursive_result = self.custom_output_layer(new_inputs, depth + 1)
# Ensure the recursive result has the correct shape with modular adjustments
recursive_result = tf.concat([
tf.expand_dims(self.mod(recursive_result[:, 0] + a, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 1] + s, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 2] + r, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 3] + w, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 4] + v, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 5] + u, 1.0), axis=1)
], axis=1)
# Combine current result with recursive result
combined_result = self.mod(result + recursive_result[:, 0], 1.0)
# Assume the output coordinates (x1, y1, z1, x2, y2, z2) are based on the combined result
x1 = combined_result + a
y1 = self.mod(combined_result + s, 1.0)
z1 = self.mod(combined_result + r, 1.0)
x2 = self.mod(combined_result + w, 1.0)
y2 = self.mod(combined_result + v, 1.0)
z2 = self.mod(combined_result + u, 1.0)
return tf.stack([x1, y1, z1, x2, y2, z2], axis=1)
def create_model(self):
input_layer = Input(shape=(self.num_features,)) # Six inputs: u, v, w, r, s, a
hidden_layer = Dense(self.units, activation=self.activation)(input_layer)
hidden_layer = Dense(self.units, activation=self.activation)(hidden_layer)
output_layer = Lambda(lambda x: self.custom_output_layer(x), output_shape=(self.y_shape,))(hidden_layer)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer=self.optimizer, loss=self.loss)
return model
</code>
def custom_output_layer(self, inputs, depth=0):
if depth >= self.max_depth:
return tf.zeros_like(inputs[:, :self.y_shape]) # Change here to match the output size
u, v, w, r, s, a = inputs[:, 0], inputs[:, 1], inputs[:, 2], inputs[:, 3], inputs[:, 4], inputs[:, 5]
c = 1.0 # Assuming c is 1.0
epsilon = 1e-9 # Small constant to prevent division by zero
# First part of the formula
term1_numerator = v + (u - v) / (1 - (u * v) / (c ** 2))
term1_denominator = 1 + (v * (u * v) / (1 - (u * v) / (c ** 2))) / (c ** 2)
term1 = term1_numerator / (term1_denominator + epsilon) # Avoid division by zero
# Second part of the formula
term2_numerator = 4 * w * r
term2_denominator = np.pi * tf.sqrt(1 - (w * r) ** 2 / (c ** 2))
term2 = term2_numerator / (term2_denominator + epsilon) # Avoid division by zero
# Apply the inverse limit of "a"
term_limit = tf.pow(a + epsilon, -1) # Add epsilon to prevent division by zero
result = tf.pow((term1 + term2) * term_limit, self.t) # Use the generic t
# Recursive call to further bifurcate the space
new_inputs = tf.stack([u, v, w, r, s, a], axis=1)
recursive_result = self.custom_output_layer(new_inputs, depth + 1)
# Ensure the recursive result has the correct shape with modular adjustments
recursive_result = tf.concat([
tf.expand_dims(self.mod(recursive_result[:, 0] + a, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 1] + s, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 2] + r, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 3] + w, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 4] + v, 1.0), axis=1),
tf.expand_dims(self.mod(recursive_result[:, 5] + u, 1.0), axis=1)
], axis=1)
# Combine current result with recursive result
combined_result = self.mod(result + recursive_result[:, 0], 1.0)
# Assume the output coordinates (x1, y1, z1, x2, y2, z2) are based on the combined result
x1 = combined_result + a
y1 = self.mod(combined_result + s, 1.0)
z1 = self.mod(combined_result + r, 1.0)
x2 = self.mod(combined_result + w, 1.0)
y2 = self.mod(combined_result + v, 1.0)
z2 = self.mod(combined_result + u, 1.0)
return tf.stack([x1, y1, z1, x2, y2, z2], axis=1)
def create_model(self):
input_layer = Input(shape=(self.num_features,)) # Six inputs: u, v, w, r, s, a
hidden_layer = Dense(self.units, activation=self.activation)(input_layer)
hidden_layer = Dense(self.units, activation=self.activation)(hidden_layer)
output_layer = Lambda(lambda x: self.custom_output_layer(x), output_shape=(self.y_shape,))(hidden_layer)
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer=self.optimizer, loss=self.loss)
return model
I was trying to make an accelerated inwards spinning torus on my GPU and got an output answer. But i didn’t expect to find this weirdness.
This was the function that i used to run the code:
<code>def apply_grsba(inputs, y, t=1.0):
processed_inputs = [input_data ** t for input_data in inputs]
processed_inputs = np.array(processed_inputs).T
num_features = processed_inputs.shape[1]
num_samples = processed_inputs.shape[0]
if y.shape[0] != num_samples:
raise ValueError(f"Shape mismatch: y should have {num_samples} rows but has {y.shape[0]}")
model = gr.GRSBA(num_features=num_features, y=y, t=1.0)
model.train(processed_inputs, y)
result = model.predict(processed_inputs)
<code>def apply_grsba(inputs, y, t=1.0):
processed_inputs = [input_data ** t for input_data in inputs]
processed_inputs = np.array(processed_inputs).T
num_features = processed_inputs.shape[1]
num_samples = processed_inputs.shape[0]
if y.shape[0] != num_samples:
raise ValueError(f"Shape mismatch: y should have {num_samples} rows but has {y.shape[0]}")
model = gr.GRSBA(num_features=num_features, y=y, t=1.0)
model.train(processed_inputs, y)
result = model.predict(processed_inputs)
return result
</code>
def apply_grsba(inputs, y, t=1.0):
processed_inputs = [input_data ** t for input_data in inputs]
processed_inputs = np.array(processed_inputs).T
num_features = processed_inputs.shape[1]
num_samples = processed_inputs.shape[0]
if y.shape[0] != num_samples:
raise ValueError(f"Shape mismatch: y should have {num_samples} rows but has {y.shape[0]}")
model = gr.GRSBA(num_features=num_features, y=y, t=1.0)
model.train(processed_inputs, y)
result = model.predict(processed_inputs)
return result