I’m making a voice assistant, and I want it to work in parallel with the cv2 graphics window open. my assistant only works after the window is closed.
import queue
import sounddevice as sd
import vosk
from vosk import Model, KaldiRecognizer
import json
import cv2
import words
from skills import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
q = queue.Queue()
model = vosk.Model('vosk-model-small-ru-0.22')
device = sd.default.device
samplerate = int(sd.query_devices(device[0], 'input')['default_samplerate'])
def callback(indata, frames, time, status):
q.put(bytes(indata))
def recognize(data, vectorizer, clf):
trg = words.TRIGGERS.intersection(data.split())
if not trg:
return
data.replace(list(trg)[0], '')
text_vector = vectorizer.transform([data]).toarray()[0]
answer = clf.predict([text_vector])[0]
func_name = answer.split()[0]
speaker(answer.replace(func_name, ''))
exec(func_name + '()')
def main():
vectorizer = CountVectorizer()
vectors = vectorizer.fit_transform(list(words.data_set.keys()))
clf = LogisticRegression()
clf.fit(vectors, list(words.data_set.values()))
del words.data_set
with sd.RawInputStream(samplerate=samplerate, blocksize=16000, device=device[0],
dtype="int16", channels=1, callback=callback):
rec = KaldiRecognizer(model, samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
image = cv2.imread('1.png') ########
cv2.imshow('image window', image) #####
cv2.waitKey(0) #####
cv2.destroyAllWindows() ##
data = json.loads(rec.Result())['text']
recognize(data, vectorizer, clf)
if __name__ == '__main__':
main()
the block of code that I highlighted ### is designed to display the cv2 window. can anyone give me ideas on how the voice assistant can work simultaneously with the cv2 graphic window? thank you in advance for your help<3
I expect my voice assistant to be able to talk to me while the cv2 graphics window is open.