I have this Neural Network:
class ValueNetwork(inputShape: Int, actionSpaceSize: Int) {
private val model: Sequential
init {
model = Sequential.of(
Input(inputShape.toLong()),
Dense(128, Activations.Relu),
Dense(128, Activations.Relu),
Dense(1, Activations.Linear),
)
model.use {
it.compile(
optimizer = Adam(),
loss = Losses.MSE,
metric = Metrics.MAE
)
}
}
fun predictValue(state: State): Float {
}
fun update(trajectories: List<Trajectory>, discountedReturns: List<Float>, learningRate: Float) {
model.use {
it.compile(optimizer = Adam(learningRate), loss = Losses.MSE, metric = Metrics.MAE)
it.fit(dataset, epochs = 10, batchSize = 64)
}
}
fun saveModel(filePath: String) {
}
fun loadModel(filePath: String) {
}
}
How can I save this Network after it’s been trained and how can I reload this trained Network after initialization?
I tried the save() and loadWeights() functions. The save function apparently saves the model as a .pb or .json (depending on the configuration) and the variables to a .txt. The loadWeights() functions is obviously the wrong function for the task, however I cannot find the counterpart to the save() function.
fun saveModel(filePath: String) {
model.save(File(filePath), writingMode = WritingMode.OVERRIDE )
}
fun loadModel(filePath: String) {
val file = File(filePath)
if (file.exists()) {
model.loadWeights(file)
}
}
I want to use these functions in the learning process of a PPO algorithm:
fun train(maxIterations: Int, initialEnv: DiscreteSchedulingEnv) {
val env = initialEnv.deepCopy()
for (iteration in 1..maxIterations) {
val trajectories = collectTrajectories(env)
updatePolicyAndValueNetworks(trajectories)
env.reset()
}
policyNetwork.saveModel(policyNetworkPath)
valueNetwork.saveModel(valueNetworkPath)
}
fun getBestAction(env: DiscreteSchedulingEnv): Action {
policyNetwork.loadModel(policyNetworkPath)
valueNetwork.loadModel(valueNetworkPath)
return policyNetwork.getBestAction(env.currentState).toActionMap()
}