def blend(s1_num, ind_f, ind_t, connection):
s2_num = 1.0 - s1_num
blended_array = []
blended_vec = []
lt = len(template)
for i in range(ind_f, ind_t):
p = np.array(nlp.vocab[s1[i]].vector)
o = np.array(nlp.vocab[s2[i]].vector)
p *= s1_num
o *= s2_num
blended_vec = p + o
ms = nlp.vocab.vectors.most_similar(np.asarray([blended_vec]), n=16)
words = [nlp.vocab.strings[w] for w in ms[0][0]]
blended_array = blended_array + [words[15].lower()]
connection.send(blended_array)
if __name__ == '__main__':
conn1, conn2 = multiprocessing.Pipe()
modified_array = []
process1 = Process(target=blend, args=(0.25, 0, 50, conn2))
process2 = Process(target=blend, args=(0.25, 50, 100, conn2))
process3 = Process(target=blend, args=(0.25, 100, 150, conn2))
process4 = Process(target=blend, args=(0.25, 150, 218, conn2))
process1.start()
modified_array = modified_array + conn1.recv()
process2.start()
modified_array = modified_array + conn1.recv()
process3.start()
modified_array = modified_array + conn1.recv()
process4.start()
modified_array = modified_array + conn1.recv()
process1.join()
process2.join()
process3.join()
process4.join()
print_arrayed_text(modified_array)
I am writing a program that does some spacy on a text, and it involves processing vectors of 300 in an array of length 218. I’m splitting an array into 4 parts and am trying to do processing on each part in parallel then joining the array afterword. Any idea why its as slow as sequential?