I need to record audio in PCM file format from raspberry pi whenever it detects a sound (passing certain threshold), and stops when it goes silent. As such, I came across a previously asked question and followed the top answer in Detect & Record Audio in Python. However, it saves the file in WAV format, but the API that I am using only reads format of pcm, speex or speex-wb.
How can I go about recording the audio in pcm format while keeping the function of recording only when it detects a sound?
from sys import byteorder
from array import array
from struct import pack
import pyaudio
import wave
THRESHOLD = 8000
chans = 1
chunk = 4096
form_1 = pyaudio.paInt16
samp_rate = 44100
dev_index = 2 # device index found by p.get_device_info_by_index(ii)
wav_output_filename = 'test4.wav'
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
silence = [0] * int(seconds * samp_rate)
r = array('h', silence)
r.extend(snd_data)
r.extend(silence)
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
audio = pyaudio.PyAudio()
stream = audio.open(format = form_1, channels=chans, rate = samp_rate,
input_device_index = dev_index, input = True, #output=True,
frames_per_buffer = chunk)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(chunk, exception_on_overflow = False))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > 30:
break
sample_width = audio.get_sample_size(form_1)
stream.stop_stream()
stream.close()
audio.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(chans)
wf.setsampwidth(sample_width) #
wf.setframerate(samp_rate)
wf.writeframes(data) #
wf.close()
if __name__ == '__main__':
print("please speak a word into the microphone")
record_to_file(wav_output_filename)
print("done - result written to " + wav_output_filename)