houcha commited on
Commit
6741bb5
·
verified ·
1 Parent(s): 28875c7

Remove @override for 3.11 compatibility

Browse files
Files changed (1) hide show
  1. pipeline.py +0 -6
pipeline.py CHANGED
@@ -1,5 +1,3 @@
1
- from typing import override
2
-
3
  from transformers import Pipeline
4
 
5
  from src.lemmatize_helper import reconstruct_lemma
@@ -17,7 +15,6 @@ class ConlluTokenClassificationPipeline(Pipeline):
17
  self.tokenizer = tokenizer
18
  self.sentenizer = sentenizer
19
 
20
- @override
21
  def _sanitize_parameters(self, output_format: str = 'list', **kwargs):
22
  if output_format not in ['list', 'str']:
23
  raise ValueError(
@@ -26,7 +23,6 @@ class ConlluTokenClassificationPipeline(Pipeline):
26
  # capture output_format for postprocessing
27
  return {}, {}, {'output_format': output_format}
28
 
29
- @override
30
  def preprocess(self, inputs: str) -> dict:
31
  if not isinstance(inputs, str):
32
  raise ValueError("pipeline input must be string (text)")
@@ -40,11 +36,9 @@ class ConlluTokenClassificationPipeline(Pipeline):
40
  self._texts = sentences
41
  return {"words": words}
42
 
43
- @override
44
  def _forward(self, model_inputs: dict) -> dict:
45
  return self.model(**model_inputs, inference_mode=True)
46
 
47
- @override
48
  def postprocess(self, model_outputs: dict, output_format: str) -> list[dict] | str:
49
  sentences = self._decode_model_output(model_outputs)
50
  # Format sentences into CoNLL-U string if requested.
 
 
 
1
  from transformers import Pipeline
2
 
3
  from src.lemmatize_helper import reconstruct_lemma
 
15
  self.tokenizer = tokenizer
16
  self.sentenizer = sentenizer
17
 
 
18
  def _sanitize_parameters(self, output_format: str = 'list', **kwargs):
19
  if output_format not in ['list', 'str']:
20
  raise ValueError(
 
23
  # capture output_format for postprocessing
24
  return {}, {}, {'output_format': output_format}
25
 
 
26
  def preprocess(self, inputs: str) -> dict:
27
  if not isinstance(inputs, str):
28
  raise ValueError("pipeline input must be string (text)")
 
36
  self._texts = sentences
37
  return {"words": words}
38
 
 
39
  def _forward(self, model_inputs: dict) -> dict:
40
  return self.model(**model_inputs, inference_mode=True)
41
 
 
42
  def postprocess(self, model_outputs: dict, output_format: str) -> list[dict] | str:
43
  sentences = self._decode_model_output(model_outputs)
44
  # Format sentences into CoNLL-U string if requested.