I run this code on windows 10 and tensorflow version 2.10 with GeForce1080 GPU. The same code runs at least 90 times faster in colab and the GPU usage on PC is at most 3%.
import sklearn
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras import optimizers
from bpmll.bpmll import bp_mll_loss
from sklearn.model_selection import KFold
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
all_history_max={}
num_folds = 10
from sklearn.metrics import coverage_error
def coverage(Y_train, y_score):
err = coverage_error(Y_train.numpy(), y_score.numpy())
return tf.convert_to_tensor(err)
from sklearn.metrics import label_ranking_loss
def ranking_loss(Y_train, y_score):
err1=label_ranking_loss(Y_train.numpy(), y_score.numpy())
return tf.convert_to_tensor(err1)
from sklearn.metrics import label_ranking_average_precision_score
def average_precision(Y_train, y_score):
err2=label_ranking_average_precision_score(Y_train.numpy(), y_score.numpy())
return tf.convert_to_tensor(err2)
if __name__ == '__main__':
inputs = np.load('C:/Users/User/encoded_matrix_60_max.npy')
targets = np.load('C:/Users/User/labels_nozerogene.npy')
dim_no = inputs.shape[1]
class_no = targets.shape[1]
hidden_nerons=0.2*60
kfold = KFold(n_splits=num_folds, shuffle=True)
fold_no = 1
for train, test in kfold.split(inputs, targets):
model = Sequential()
model.add(Dense(hidden_nerons, input_dim=dim_no, activation='relu'))
model.add(Dense(class_no, activation='sigmoid'))
adam=keras.optimizers.Adam(learning_rate=0.05)
model.compile(loss=bp_mll_loss, optimizer=adam, metrics= [coverage,ranking_loss,average_precision])
tf.config.run_functions_eagerly(True)
tf.data.experimental.enable_debug_mode()
history = model.fit(inputs[train], tf.cast(targets[train], tf.float32),shuffle=True,batch_size=10, epochs=100)
scores = model.evaluate(inputs[test], targets[test], verbose=0)
fold_no = fold_no + 1
Also I get these warnings:
I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-06-08 14:33:09.992440: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 6604 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1080, pci bus id: 0000:01:00.0, compute capability: 6.1
Epoch 1/100
WARNING:tensorflow:AutoGraph could not transform <function validate_parameter_constraints at 0x0000021736169A60> and will run it as-is.
Cause: for/else statement not yet supported
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
whene running this code to see if GPU is recognized by tensorflow:
python -c "import tensorflow as tf; print(tf.config.list_physical_devices('GPU'))"
I get this result:
[PhysicalDevice(name=’/physical_device:GPU:0′, device_type=’GPU’)]