text
stringlengths 0
4.99k
|
---|
) |
) |
Found 6 files belonging to 2 directories |
Resample all noise samples to 16000 Hz |
command = ( |
\"for dir in `ls -1 \" + DATASET_NOISE_PATH + \"`; do \" |
\"for file in `ls -1 \" + DATASET_NOISE_PATH + \"/$dir/*.wav`; do \" |
\"sample_rate=`ffprobe -hide_banner -loglevel panic -show_streams \" |
\"$file | grep sample_rate | cut -f2 -d=`; \" |
\"if [ $sample_rate -ne 16000 ]; then \" |
\"ffmpeg -hide_banner -loglevel panic -y \" |
\"-i $file -ar 16000 temp.wav; \" |
\"mv temp.wav $file; \" |
\"fi; done; done\" |
) |
os.system(command) |
# Split noise into chunks of 16000 each |
def load_noise_sample(path): |
sample, sampling_rate = tf.audio.decode_wav( |
tf.io.read_file(path), desired_channels=1 |
) |
if sampling_rate == SAMPLING_RATE: |
# Number of slices of 16000 each that can be generated from the noise sample |
slices = int(sample.shape[0] / SAMPLING_RATE) |
sample = tf.split(sample[: slices * SAMPLING_RATE], slices) |
return sample |
else: |
print(\"Sampling rate for {} is incorrect. Ignoring it\".format(path)) |
return None |
noises = [] |
for path in noise_paths: |
sample = load_noise_sample(path) |
if sample: |
noises.extend(sample) |
noises = tf.stack(noises) |
print( |
\"{} noise files were split into {} noise samples where each is {} sec. long\".format( |
len(noise_paths), noises.shape[0], noises.shape[1] // SAMPLING_RATE |
) |
) |
6 noise files were split into 354 noise samples where each is 1 sec. long |
Dataset generation |
def paths_and_labels_to_dataset(audio_paths, labels): |
\"\"\"Constructs a dataset of audios and labels.\"\"\" |
path_ds = tf.data.Dataset.from_tensor_slices(audio_paths) |
audio_ds = path_ds.map(lambda x: path_to_audio(x)) |
label_ds = tf.data.Dataset.from_tensor_slices(labels) |
return tf.data.Dataset.zip((audio_ds, label_ds)) |
def path_to_audio(path): |
\"\"\"Reads and decodes an audio file.\"\"\" |
audio = tf.io.read_file(path) |
audio, _ = tf.audio.decode_wav(audio, 1, SAMPLING_RATE) |
return audio |
def add_noise(audio, noises=None, scale=0.5): |
if noises is not None: |
# Create a random tensor of the same size as audio ranging from |
# 0 to the number of noise stream samples that we have. |
tf_rnd = tf.random.uniform( |
(tf.shape(audio)[0],), 0, noises.shape[0], dtype=tf.int32 |
) |
noise = tf.gather(noises, tf_rnd, axis=0) |
# Get the amplitude proportion between the audio and the noise |
prop = tf.math.reduce_max(audio, axis=1) / tf.math.reduce_max(noise, axis=1) |
prop = tf.repeat(tf.expand_dims(prop, axis=1), tf.shape(audio)[1], axis=1) |
# Adding the rescaled noise to audio |
audio = audio + noise * prop * scale |
return audio |
def audio_to_fft(audio): |
# Since tf.signal.fft applies FFT on the innermost dimension, |
# we need to squeeze the dimensions and then expand them again |
# after FFT |
audio = tf.squeeze(audio, axis=-1) |
fft = tf.signal.fft( |
tf.cast(tf.complex(real=audio, imag=tf.zeros_like(audio)), tf.complex64) |
) |
fft = tf.expand_dims(fft, axis=-1) |
# Return the absolute value of the first half of the FFT |
# which represents the positive frequencies |
return tf.math.abs(fft[:, : (audio.shape[1] // 2), :]) |
# Get the list of audio file paths along with their corresponding labels |
class_names = os.listdir(DATASET_AUDIO_PATH) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.