Evan-Lin commited on
Commit
b924d93
·
1 Parent(s): b87e6ce

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +10 -8
handler.py CHANGED
@@ -28,7 +28,8 @@ class EndpointHandler():
28
  processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)
29
  feature_extractor = processor.feature_extractor
30
  self.forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task)
31
- self.pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
 
32
 
33
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
34
  """
@@ -42,11 +43,12 @@ class EndpointHandler():
42
 
43
  # run normal prediction
44
  inputs = data.pop("inputs", data)
45
- print(inputs)
46
- print(inputs, file=sys.stderr)
47
- print(inputs, file=sys.stdout)
48
- prediction = self.pipeline(inputs, generate_kwargs={"forced_decoder_ids": self.forced_decoder_ids}, max_new_tokens=255)["text"]
49
- print(prediction)
50
- print(predcition, file=sys.stderr)
51
- print(predcition, file=sys.stdout)
 
52
  return prediction
 
28
  processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)
29
  feature_extractor = processor.feature_extractor
30
  self.forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task)
31
+ # self.pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
32
+ self.pipeline = pipeline(task= "automatic-speech-recognition", model=path, tokenizer=tokenizer, feature_extractor = feature_extractor)
33
 
34
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
35
  """
 
43
 
44
  # run normal prediction
45
  inputs = data.pop("inputs", data)
46
+ print("a1", inputs)
47
+ print("a2", inputs, file=sys.stderr)
48
+ print("a3", inputs, file=sys.stdout)
49
+ # prediction = self.pipeline(inputs, generate_kwargs={"forced_decoder_ids": self.forced_decoder_ids}, max_new_tokens=255)["text"]
50
+ prediction = self.pipeline(inputs, return_timestamps=False)
51
+ print("b1", prediction)
52
+ print("b2", predcition, file=sys.stderr)
53
+ print("b3", predcition, file=sys.stdout)
54
  return prediction