I’m trying to create a custom loss function that calculates the total amount won based on the predictions, actual outcome of games and the odds for the home team to win.
Currently, I’ve gotten so far as this:
# Custom loss function to minimize money lost
def custom_loss(odds):
def loss_func(y_true, y_pred):
bet_amount = 100 # Constant bet amount (100 euros per game)
# Money lost or gained calculation
# If home team won (y_true == 1) and the prediction was correct
money_lost = tf.where(
y_true == 1, # If home team won
(1 - y_pred) * (odds - 1) * bet_amount, # Profit (if correct prediction)
y_pred * bet_amount # Full loss (if wrong prediction)
)
# Return the average loss
return tf.reduce_mean(money_lost)
return loss_func
def build_model(input_shape_features):
# Input for game features
input_features = Input(shape=input_shape_features, name="game_features")
# Input for odds
input_odds = Input(shape=(1,), name="odds")
# Neural network to process game features
x = layers.Dense(64, activation='relu')(input_features)
x = layers.Dense(32, activation='relu')(x)
output_prob = layers.Dense(1, activation='sigmoid')(x) # Output: probability of home team win
# Add a Lambda layer to pass the odds into the custom loss function
model = Model(inputs=[input_features, input_odds], outputs=output_prob)
# Compile the model using a custom loss function
model.compile(optimizer='adam', loss=custom_loss(odds=input_odds))
return model
# Train the model with two inputs
def train_model(X_train_features, X_train_odds, y_train):
# Build the model
model = build_model(input_shape_features=(X_train_features.shape[1],))
# Train the model
model.fit([X_train_features, X_train_odds], y_train, epochs=10, batch_size=32)
return model
But, if I fit this model, I get the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[66], line 1
----> 1 model = train_model(X_train_features, X_train_odds, y_train)
Cell In[64], line 44
41 model = build_model(input_shape_features=(X_train_features.shape[1],))
43 # Train the model
---> 44 model.fit([X_train_features, X_train_odds], y_train, epochs=10, batch_size=32)
46 return model
File /usr/local/lib/python3.11/site-packages/keras/src/utils/traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)
119 filtered_tb = _process_traceback_frames(e.__traceback__)
120 # To get the full stack trace, call:
121 # `keras.config.disable_traceback_filtering()`
--> 122 raise e.with_traceback(filtered_tb) from None
123 finally:
124 del filtered_tb
Cell In[64], line 8
4 bet_amount = 100 # Constant bet amount (100 euros per game)
6 # Money lost or gained calculation
7 # If home team won (y_true == 1) and the prediction was correct
----> 8 money_lost = tf.where(
9 y_true == 1, # If home team won
10 (1 - y_pred) * (odds - 1) * bet_amount, # Profit (if correct prediction)
11 y_pred * bet_amount # Full loss (if wrong prediction)
12 )
14 # Return the average loss
15 return tf.reduce_mean(money_lost)
ValueError: Tried to convert 't' to a tensor and failed. Error: A KerasTensor cannot be used as input to a TensorFlow function. A KerasTensor is a symbolic placeholder for a shape and dtype, used when constructing Keras Functional models or Keras Functions. You can only use it as input to a Keras layer or a Keras operation (from the namespaces `keras.layers` and `keras.operations`). You are likely doing something like:
```
x = Input(...)
...
tf_fn(x) # Invalid.
```
What you should do instead is wrap `tf_fn` in a layer:
```
class MyLayer(Layer):
def call(self, x):
return tf_fn(x)
x = MyLayer()(x)
```
If I understand correctly, it’s not possible to use tf.where()
in the way that I use it right now. But I’m still a bit lost how I can achieve this. Basically, I want to sum all the money won if the prediction is correct and all the losses (-100) if the predictions is incorrect.
Any tips are welcome!
EDIT:
I got this code to work, but unfortunately this does not include the odds in the custom loss function. Any idea how I can add this in?
# Neural network architecture
def build_model(input_shape):
model = Sequential([
layers.Input(shape=input_shape),
layers.Dense(64, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(1, activation='sigmoid') # Single output: probability of home team winning
])
return model
# Custom loss function
def custom_loss(y_true, y_pred):
bet_amount = 100 # Constant bet amount of 100 euros
# In this case, `y_pred` is the predicted probability of the home team winning
# You lose the bet amount if the home team loses
# If the home team wins, you gain the bet amount * (odds - 1)
# Assuming y_pred represents the predicted probability and odds influence the decision to bet
# y_pred will be the predicted probability of winning
money_lost = bet_amount * tf.where(
y_true == 1, # Home team wins
(1 - y_pred), # If predicted probability was wrong, you lose part of the potential gains
y_pred # If home team loses, you lose the entire bet
)
# The total loss is the mean of the money lost
return tf.reduce_mean(money_lost)
# Compile the model with the custom loss
def compile_model(model):
model.compile(optimizer='adam',
loss=custom_loss,
metrics=['accuracy'])
# Example of training the model
def train_model(X_train, y_train):
model = build_model(input_shape=(X_train.shape[1],))
compile_model(model)
# Train the model
model.fit(X_train, y_train, epochs=250, batch_size=32)
return model
The error you’re facing is described in the Keras 3 documentation: Calling TF ops with a KerasTensor:
Using a TF op on a Keras tensor during functional model construction is disallowed: “A KerasTensor cannot be used as input to a TensorFlow function”.
The following snippet of code will reproduce the error:
input = keras.layers.Input([2, 2, 1]) tf.squeeze(input)
How to fix it: use an equivalent op from
keras.ops
.input = keras.layers.Input([2, 2, 1]) keras.ops.squeeze(input)
In your case, you should be able to transparently use keras.ops.where
instead of tf.where
, and, similarly, keras.ops.mean
instead of tf.reduce_mean
.
1