I currently have this code running
def single_iteration(iter:int):
some_parameters =100
do_somthing_get data1, data2, data3, data4
result = []
for i in range 100:
data = {'Col1':data1, 'Col2':data1, 'Col3':data3, 'Col4':data4}
results.append(data)
df = pd.DataFrame(result)
return df
if __name__ == "__main__":
run_stop = 100
number_of_cores = int(os.environ['SLURM_CPUS_PER_TASK'])
with multiprocessing.Pool(number_of_cores) as pool:
results = pool.map(single_iteration, range(run_stop))
df = pd.concat(results, ignore_index=True)
df.to_csv(path, file_name)
However, now I want two different csvs , like with df1 = pd.DaFrame[{'Col1':data1, 'Col2':data2}]
and df2 = pd.DaFrame[{'Col3':data3, 'Col4':data4}]
and return
them in each run and then concat them separately and save them.