I have a lot of audio recordings for lectures where I say the same thing multiple times, mostly it’s incomplete statements like:
“this is the part” (and then retrying)
“this is the part where” (and then retrying)
“this is the part where we will explore the theory”
Visually in any audio or video editor, I can spot these as the waveforms look very similar i.e. the same type of high low points on the waveform.
So I am trying to use Python to do the same, so that I only keep the last retake but silence the older ones, so the audio length doesn’t change. Right now my approach is like this but it gives me the exact same audio as the input.
import librosa
import numpy as np
import soundfile as sf
import os
# Load the audio file
file_path = r'C:test.wav'
y, sr = librosa.load(file_path, sr=None)
# Parameters
chunk_duration = 1 # seconds
overlap = 0.75 # 75% overlap for better similarity matching
chunk_length = int(chunk_duration * sr)
step_length = int(chunk_length * (1 - overlap))
# List to store the indices of the last occurrence of each chunk
last_occurrence_end = 0
# Create a list to store the final audio chunks
final_audio = []
# Loop over the audio in chunks
i = 0
while i < len(y) - chunk_length:
chunk = y[i:i + chunk_length]
next_chunk_start = i + step_length
# If this is the last chunk, just keep it
if next_chunk_start + chunk_length > len(y):
last_occurrence_end = len(y)
break
next_chunk = y[next_chunk_start:next_chunk_start + chunk_length]
# Compute simple Euclidean distance between the two chunks
distance = np.linalg.norm(chunk - next_chunk)
# Set a threshold for similarity
if distance > 1000: # Adjust this threshold as needed
if i > last_occurrence_end:
final_audio.append(y[last_occurrence_end:i])
last_occurrence_end = i + chunk_length
i = next_chunk_start
# Append the last segment after the loop
if last_occurrence_end < len(y):
final_audio.append(y[last_occurrence_end:])
# Check if any chunks were added to final_audio
if final_audio:
# Concatenate all the kept chunks to form the final trimmed audio
final_audio = np.concatenate(final_audio)
else:
# If no chunks were kept, return the original audio
final_audio = y
# Define the new file path with "_clean" suffix
new_file_path = os.path.splitext(file_path)[0] + '_clean.wav'
# Export the trimmed audio
sf.write(new_file_path, final_audio, sr)
print(f"Cleaned audio saved as: {new_file_path}")