meg HF Staff commited on
Commit
c334e65
·
verified ·
1 Parent(s): aad4cd6

Modularizing, documenting, and adding LLM-generation support.

Browse files
Files changed (1) hide show
  1. src/process.py +87 -0
src/process.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import difflib
2
+ import re
3
+ from functools import lru_cache
4
+
5
+ import gradio.components.audio as gr_audio
6
+ import torch
7
+ from transformers import pipeline
8
+
9
+
10
+ # ------------------- Utilities -------------------
11
+ def normalize_text(t: str, lower: bool = True) -> str:
12
+ """For normalizing LLM-generated and human-generated strings.
13
+ For LLMs, this removes extraneous quote marks and spaces."""
14
+ # English-only normalization: lowercase, keep letters/digits/' and -
15
+ if lower:
16
+ t = t.lower()
17
+ # TODO: Previously was re.sub(r"[^a-z0-9'\-]+", " ", t); discuss normalizing for LLMs too.
18
+ t = re.sub(r"[^a-zA-Z0-9'\-.,]+", " ", t)
19
+ t = re.sub(r"\s+", " ", t).strip()
20
+ return t
21
+
22
+
23
+ @lru_cache(maxsize=2)
24
+ def get_asr_pipeline(model_id: str, device_preference: str) -> pipeline:
25
+ """Cache an ASR pipeline.
26
+ Parameters:
27
+ model_id: String of desired ASR model.
28
+ device_preference: String of desired device for ASR processing, "cuda", "cpu", or "auto".
29
+ Returns:
30
+ transformers.pipeline ASR component.
31
+ """
32
+ if device_preference == "cuda" and torch.cuda.is_available():
33
+ device = 0
34
+ elif device_preference == "auto":
35
+ device = 0 if torch.cuda.is_available() else -1
36
+ else:
37
+ device = -1
38
+ return pipeline(
39
+ "automatic-speech-recognition",
40
+ model=model_id, # use English-only Whisper models (.en)
41
+ device=device,
42
+ chunk_length_s=30,
43
+ return_timestamps=False,
44
+ )
45
+
46
+ def run_asr(audio_path: gr_audio, model_id: str, device_pref: str) -> str | Exception:
47
+ """Returns the recognized user utterance from the input audio stream.
48
+ Parameters:
49
+ audio_path: gradio.Audio component.
50
+ model_id: String of desired ASR model.
51
+ device_preference: String of desired device for ASR processing, "cuda", "cpu", or "auto".
52
+ Returns:
53
+ hyp_raw: Recognized user utterance.
54
+ """
55
+ asr = get_asr_pipeline(model_id, device_pref)
56
+ try:
57
+ # IMPORTANT: For English-only Whisper (.en), do NOT pass language/task args.
58
+ result = asr(audio_path)
59
+ hyp_raw = result["text"].strip()
60
+ except Exception as e:
61
+ return e
62
+ return hyp_raw
63
+
64
+ def similarity_and_diff(ref_tokens: list, hyp_tokens: list) -> (float, list[str, int, int, int]):
65
+ """
66
+ Returns:
67
+ ratio: Similarity ratio (0..1).
68
+ opcodes: List of differences between target and recognized user utterance.
69
+ """
70
+ sm = difflib.SequenceMatcher(a=ref_tokens, b=hyp_tokens)
71
+ ratio = sm.ratio()
72
+ opcodes = sm.get_opcodes()
73
+ return ratio, opcodes
74
+
75
+ class SentenceMatcher:
76
+ """Class for keeping track of (target sentence, user utterance) match features."""
77
+ def __init__(self, target_sentence, user_transcript, pass_threshold):
78
+ self.target_sentence: str = target_sentence
79
+ self.user_transcript: str = user_transcript
80
+ self.pass_threshold: float = pass_threshold
81
+ self.target_tokens: list = normalize_text(target_sentence).split()
82
+ self.user_tokens: list = normalize_text(user_transcript).split()
83
+ self.ratio: float
84
+ self.alignments: list
85
+ self.ratio, self.alignments = similarity_and_diff(self.target_tokens,
86
+ self.user_tokens)
87
+ self.passed: bool = self.ratio >= self.pass_threshold