|
|
|
@@ -0,0 +1,97 @@
|
|
|
|
|
import pyaudio
|
|
|
|
|
import wave
|
|
|
|
|
import argparse
|
|
|
|
|
import os
|
|
|
|
|
import datetime
|
|
|
|
|
from pydub import AudioSegment
|
|
|
|
|
from pydub.silence import detect_nonsilent
|
|
|
|
|
|
|
|
|
|
# Parameters for the audio stream
|
|
|
|
|
FORMAT = pyaudio.paInt16
|
|
|
|
|
CHANNELS = 2
|
|
|
|
|
RATE = 48000
|
|
|
|
|
CHUNK = 1024
|
|
|
|
|
SILENCE_THRESHOLD = -50 # Silence threshold (adjust for noise level)
|
|
|
|
|
SILENCE_DURATION = 1250 # n miliseconds of silence to stop recording
|
|
|
|
|
|
|
|
|
|
audio = pyaudio.PyAudio()
|
|
|
|
|
|
|
|
|
|
# Create the recordings directory if it doesn't exist
|
|
|
|
|
RECORDINGS_DIR = './recordings'
|
|
|
|
|
if not os.path.exists(RECORDINGS_DIR):
|
|
|
|
|
os.makedirs(RECORDINGS_DIR)
|
|
|
|
|
|
|
|
|
|
def get_filename():
|
|
|
|
|
"""Generate a filename with the current date and time, stored in the recordings directory."""
|
|
|
|
|
return os.path.join(RECORDINGS_DIR, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ".wav")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def save_wave_file(filename, audio_segment):
|
|
|
|
|
"""Save the given audio segment to a WAV file in the recordings directory."""
|
|
|
|
|
audio_segment.export(filename, format="wav")
|
|
|
|
|
print(f"Saved recording: {filename}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def detect_nonsilent_chunk(sound, silence_thresh=SILENCE_THRESHOLD, silence_len=SILENCE_DURATION):
|
|
|
|
|
"""Detect if there's a nonsilent chunk in the sound."""
|
|
|
|
|
return detect_nonsilent(sound, min_silence_len=silence_len, silence_thresh=silence_thresh)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def record_transmissions(device_id: int):
|
|
|
|
|
stream = audio.open(format=FORMAT, channels=CHANNELS,
|
|
|
|
|
rate=RATE, input=True,
|
|
|
|
|
frames_per_buffer=CHUNK, input_device_index=device_id )
|
|
|
|
|
|
|
|
|
|
frames = []
|
|
|
|
|
recording = False
|
|
|
|
|
|
|
|
|
|
print("Listening for transmissions...")
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
data = stream.read(CHUNK)
|
|
|
|
|
frames.append(data)
|
|
|
|
|
|
|
|
|
|
# Convert current audio buffer to AudioSegment for processing
|
|
|
|
|
sound = AudioSegment(b''.join(frames), sample_width=audio.get_sample_size(FORMAT), frame_rate=RATE, channels=CHANNELS)
|
|
|
|
|
|
|
|
|
|
# Detect if there's sound (nonsilent chunk)
|
|
|
|
|
nonsilent_chunks = detect_nonsilent_chunk(sound)
|
|
|
|
|
|
|
|
|
|
if nonsilent_chunks and not recording:
|
|
|
|
|
print("Transmission detected, starting recording...")
|
|
|
|
|
recording = True
|
|
|
|
|
frames = [] # Reset frames to start fresh for this transmission
|
|
|
|
|
|
|
|
|
|
elif recording and not nonsilent_chunks:
|
|
|
|
|
# Check if there were valid nonsilent chunks before trimming
|
|
|
|
|
if len(nonsilent_chunks) > 0:
|
|
|
|
|
# Transmission has ended (silence detected for the required duration)
|
|
|
|
|
if len(frames) > 0:
|
|
|
|
|
# Save recording without leading/trailing silence
|
|
|
|
|
trimmed_audio = sound[nonsilent_chunks[0][0]:nonsilent_chunks[-1][1]]
|
|
|
|
|
filename = get_filename()
|
|
|
|
|
save_wave_file(filename, trimmed_audio)
|
|
|
|
|
|
|
|
|
|
recording = False
|
|
|
|
|
frames.clear() # Clear frames to prepare for the next transmission
|
|
|
|
|
print("Recording stopped, waiting for the next transmission...")
|
|
|
|
|
|
|
|
|
|
# Optionally: adjust silence threshold if needed
|
|
|
|
|
# E.g., try increasing silence_thresh to detect lower sounds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_arguments():
|
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
|
parser.add_argument("device_id", type=int, help="The ID of the audio device to use")
|
|
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
try:
|
|
|
|
|
args = parse_arguments()
|
|
|
|
|
print("Arguments: %s", args)
|
|
|
|
|
record_transmissions(args.device_id)
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
|
print("Stopping...")
|
|
|
|
|
finally:
|
|
|
|
|
audio.terminate()
|