In the terminal the ai can respond with me with text but overall this code is also suppose to make the ai talk with whatever she reponds, but there is no sound or anything coming from the ai
i dont know if i really need silero tts because i took it out of the code since i couldnt figure out why it was not working to begin with. I dont think that i need since i already have voicevox with dockor desktop running. i do have a output.wave in .VSCODE. files for the code
this is my code (i took my api key out on purpose so that is not the problem)
I put my code so yall could run it and tell me a solution to my problem thank you.(copied code)
import os
import torchaudio
import urllib
import urllib.parse
import requests
import openai
import winsound
import sys
import pytchat
import time
import re
import pyaudio
import keyboard
import wave
import threading
import socket
import utils
import python_utils
from silero import silero_tts
from config import *
from katakana import*
from translate import*
from emoji import demojize
from config import *
# to help the CLI write unicode characters to the terminal
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
# use your own API Key, you can get it from https://openai.com/. I place my API Key in a separate file called config.py
openai.api_key =
conversation = []
# Create a dictionary to hold the message data
history = {"history": conversation}
mode = 0
total_characters = 0
chat = ""
chat_now = ""
chat_prev = ""
is_Speaking = False
owner_name = "Ardha"
blacklist = ["Nightbot", "streamelements"]
# function to get the user's input audio
def record_audio():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
WAVE_OUTPUT_FILENAME = "input.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
print("Recording...")
while keyboard.is_pressed('RIGHT_SHIFT'):
data = stream.read(CHUNK)
frames.append(data)
print("Stopped recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
transcribe_audio("input.wav")
# function to transcribe the user's audio
def transcribe_audio(file):
global chat_now
audio_file= open(file, "rb")
# Translating the audio to English
# transcript = openai.Audio.translate("whisper-1", audio_file)
# Transcribe the audio to detected language
transcript = openai.Audio.transcribe("whisper-1", audio_file)
chat_now = transcript.text
print ("Question: " + chat_now)
result = owner_name + " said " + chat_now
conversation.append({'role': 'user', 'content': result})
openai_answer()
# function to get an answer from OpenAI
def openai_answer():
global total_characters, conversation
total_characters = sum(len(d['content']) for d in conversation)
while total_characters > 4000:
try:
# print(total_characters)
# print(len(conversation))
conversation.pop(2)
total_characters = sum(len(d['content']) for d in conversation)
except Exception as e:
print("Error removing old messages: {0}".format(e))
with open("conversation.json", "w", encoding="utf-8") as f:
# Write the message data to the file in JSON format
json.dump(history, f, indent=4)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=128,
temperature=1,
top_p=0.9
)
message = response['choices'][0]['message']['content']
conversation.append({'role': 'assistant', 'content': message})
translate_text(message)
# function to capture livechat from youtube
def yt_livechat(video_id):
global chat
live = pytchat.create(video_id=video_id)
while live.is_alive():
# while True:
try:
for c in live.get().sync_items():
# Ignore chat from the streamer and Nightbot, change this if you want to include the streamer's chat
if c.author.name in blacklist:
continue
# if not c.message.startswith("!") and c.message.startswith('#'):
if not c.message.startswith("!"):
# Remove emojis from the chat
chat_raw = re.sub(r':[^s]+:', '', c.message)
chat_raw = chat_raw.replace('#', '')
# chat_author makes the chat look like this: "Nightbot: Hello". So the assistant can respond to the user's name
chat = c.author.name + ' berkata ' + chat_raw
print(chat)
time.sleep(1)
except Exception as e:
print("Error receiving chat: {0}".format(e))
def twitch_livechat():
global chat
sock = socket.socket()
sock.connect((server, port))
sock.send(f"PASS {token}n".encode('utf-8'))
sock.send(f"NICK {nickname}n".encode('utf-8'))
sock.send(f"JOIN {channel}n".encode('utf-8'))
regex = r":(w+)!w+@w+.tmi.twitch.tv PRIVMSG #w+ :(.+)"
while True:
try:
resp = sock.recv(2048).decode('utf-8')
if resp.startswith('PING'):
sock.send("PONGn".encode('utf-8'))
elif not user in resp:
resp = demojize(resp)
match = re.match(regex, resp)
username = match.group(1)
message = match.group(2)
if username in blacklist:
continue
chat = username + ' said ' + message
print(chat)
except Exception as e:
print("Error receiving chat: {0}".format(e))
# translating is optional
def translate_text(text):
global is_Speaking
# subtitle will act as subtitle for the viewer
result_id = translate_google(text,"EN", "EN")
print("ID Answer: " + result_id)
# subtitle = translate_google(text, "ID")
# tts will be the string to be converted to audio
detect = detect_google(text)
tts = translate_google(text, f"{detect}", "JA")
# tts = translate_deeplx(text, f"{detect}", "JA")
tts_en = translate_google(text, f"{detect}", "EN")
try:
# print("ID Answer: " + subtitle)
print("JP Answer: " + tts)
print("EN Answer: " + tts_en)
except Exception as e:
print("Error printing text: {0}".format(e))
return
# Choose between the available TTS engines
# Japanese TTS
# voicevox_tts(tts)
def speech_text(result_jp, result_id):
# You need to run VoicevoxEngine.exe first before running this script
global is_Speaking
voicevox_url = 'http://localhost:50021'
katakana_text = katakana_converter
programs_encoded = urllib.parse.urlencode({'text': katakana_text, 'speaker': 46})
request = requests.post(f'http://127.0.0.1:/audio_query?{params_encoded}')
params_encoded = urllib.parse.urlencode({'speaker': 46, 'enable_interrogative_unspeak': True})
request = requests.post(f'http://127.0.0.1:50021/synthesis?{params_encoded}', json=request.json())
with open("output.wav", "wb") as outfile:
outfile.write(requests.content)
# output.txt will be used to display the subtitle on OBS
with open("output.txt", "w", encoding="utf-8") as outfile:
try:
text = result_id
words = text.split()
lines = [words[i:i+10] for i in range(0, len(words), 10)]
for line in lines:
outfile.write(" ".join(line) + "n")
except:
print("Error writing to output.txt")
# chat.txt will be used to display the chat/question on OBS
with open("chat.txt", "w", encoding="utf-8") as outfile:
try:
words = chat_now.split()
lines = [words[i:i+10] for i in range(0, len(words), 10)]
for line in lines:
outfile.write(" ".join(line) + "n")
except:
print("Error writing to chat.txt")
time.sleep(1)
# is_Speaking is used to prevent the assistant speaking more than one audio at a time
is_Speaking = True
winsound.PlaySound("output.wav", winsound.SND_FILENAME)
is_Speaking = False
# Clear the text files after the assistant has finished speaking
time.sleep(1)
with open ("output.txt", "w") as f:
f.truncate(0)
with open ("chat.txt", "w") as f:
f.truncate(0)
def preparation():
global conversation, chat_now, chat, chat_prev
while True:
# If the assistant is not speaking, and the chat is not empty, and the chat is not the same as the previous chat
# then the assistant will answer the chat
chat_now = chat
if is_Speaking == False and chat_now != chat_prev:
# Saving chat history
conversation.append({'role': 'user', 'content': chat_now})
chat_prev = chat_now
openai_answer()
time.sleep(1)
if __name__ == "__main__":
try:
# You can change the mode to 1 if you want to record audio from your microphone
# or change the mode to 2 if you want to capture livechat from youtube
mode = input("Mode (1-Mic, 2-Youtube Live, 3-Twitch Live): ")
if mode == "1":
print("Press and Hold Right Shift to record audio")
while True:
if keyboard.is_pressed('RIGHT_SHIFT'):
record_audio()
elif mode == "2":
live_id = input("Livestream ID: ")
# Threading is used to capture livechat and answer the chat at the same time
t = threading.Thread(target=preparation)
t.start()
yt_livechat(live_id)
elif mode == "3":
# Threading is used to capture livechat and answer the chat at the same time
print("To use this mode, make sure to change utils/twitch_config.py to your own config")
t = threading.Thread(target=preparation)
t.start()
twitch_livechat()
except KeyboardInterrupt:
t.join()
print("Stopped")
i looked for vidoes that could make the tts for the ai but there is no videos related to that
Israel Barrios is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.