I am using the Google Gemini API to upload and analyze videos. The code works, but it’s quite slow. It takes about 1.3 minutes to process a 2-minute video. I would like to understand why this is happening and how I can optimize my code to reduce the processing time.
here is my code:
def upload_to_gemini(path, mime_type=None):
file = genai.upload_file(path, mime_type=mime_type)
print(f"Uploaded file '{file.display_name}' as: {file.uri}")
return file
def wait_for_files_active(files):
print("Waiting for file processing...")
for name in (file.name for file in files):
file = genai.get_file(name)
while file.state.name == "PROCESSING":
print(".", end="", flush=True)
time.sleep(10)
file = genai.get_file(name)
if file.state.name != "ACTIVE":
raise Exception(f"File {file.name} failed to process")
print("...all files ready")
print()
def get_prompt(language):
base_path = os.path.dirname(os.path.abspath(__file__))
prompt_file = 'prompt1g.txt' if language == 'en' else 'prompt2g.txt'
prompt_path = os.path.join(base_path, prompt_file)
with open(prompt_path, 'r', encoding="utf-8") as file:
return file.read()
def analyze_video(video_path, language):
try:
uploaded_file = upload_to_gemini(video_path, mime_type="video/mp4")
wait_for_files_active([uploaded_file])
model = genai.GenerativeModel(
model_name="gemini-1.5-flash"
)
prompt = get_prompt(language)
chat_session = model.start_chat(
history=[
{
"role": "user",
"parts": [
uploaded_file,
],
},
]
)
print("Sending prompt to chat session...")
response = chat_session.send_message(prompt)
print("Received response from chat session.")
print(response.text)
return response.text
except Exception as e:
print(f"Error in analyze_video: {str(e)}")
raise
Despite this workflow, the processing time is much longer than expected. Is there any way to optimize the file upload and processing steps, or improve the overall performance of this code? Any suggestions or insights would be greatly appreciated.