File size: 2,783 Bytes
1163ca9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import torch
import gradio as gr
from torchaudio.sox_effects import apply_effects_file
from transformers import AutoFeatureExtractor, AutoModelForAudioXVector

device = "cuda" if toch.cuda.is_available() else "cpu"
EFFECTS = [
    ['remix', '-'], # pour fusionner tous les canaux
    ["channels", "1"], #channel-->mono
    ["rate", "16000"], # rééchantillonner à 16000 Hz
    ["gain", "-1.0"], #Atténuation -1 dB
    ["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
    # ['pad', '0', '1.5'], # pour ajouter 1,5 seconde à la fin
    ['trim', '0', '10'], # obtenir les 10 premières secondes
]

model_name = "microsoft/unispeech-sat-base-plus-sv"
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)

#Réglage de la valeur seuil
SEUIL = 0,85

cosine_similarity = torch.nn.CosineSimilarity(dim=-1)

def similarity_fn(path1, path2):
  if not (path1 and path2):
    return 'ERROR: Please record audio for *both* speakers!'
  #Applying the effects to both the audio input files
  wav1, _ = apply_effects_file(path1, EFFECTS)
  wav2, _ = apply_effects_file(path2,EFFECTS)
  #Extracting features
  input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
  input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
  with torch.no_grad():
    emb1 = model(input1).embeddings
    emb2 = model(input2).embeddings
  emb1 = torch.nn.functional.normalize(emb1, dim=-1).to(device)
  emb2 = torch.nn.functional.normalize(emb2, dim=-1).to(device)
  similarity = cosine_similarity(emb1, emb2).numpy()[0]
  if similarity>= THRESHOLD:
    return f"Similarity score is {similarity :.0%}. Audio belongs to the same person "
  elif similarity< THRESHOLD:
    return f"Similarity score is {similarity:.0%}. Audio doesn't belong to the same person.Authentication failed!"

inputs = [
    gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"),
    gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"),
]

outputs = gr.outputs.Textbox(label="Output Text")
description = (
    "This app evaluates whether the given audio speech inputs belong to the same individual based on Cosine Similarity score. "
)

interface = gr.Interface(
    fn=similarity_fn,
    inputs=inputs,
    outputs=outputs,
    title="Voice Authentication with UniSpeech-SAT + X-Vectors",
    description=description,
    layout="horizontal",
    theme="grass",
    allow_flagging=False,
    live=False,
    examples=[
        ["cate_blanch.mp3", "cate_blanch_2.mp3"],
        ["cate_blanch.mp3", "denzel_washington.mp3"]
    ]
)

interface.launch(enable_queue=True)