Xin Zhang commited on
Commit
fdeedee
·
1 Parent(s): ce0e589

[fix]: typo

Browse files
Files changed (1) hide show
  1. transcribe/strategy.py +30 -30
transcribe/strategy.py CHANGED
@@ -34,17 +34,17 @@ class TranscriptResult:
34
  class TranscriptToken:
35
  """表示一个转录片段,包含文本和时间信息"""
36
  text: str # 转录的文本内容
37
- t0: float # 开始时间(百分之一秒)
38
- t1: float # 结束时间(百分之一秒)
39
 
40
  def is_punctuation(self):
41
  """检查文本是否包含标点符号"""
42
  return REGEX_MARKERS.search(self.text.strip()) is not None
43
-
44
  def is_end(self):
45
  """检查文本是否为句子结束标记"""
46
  return SENTENCE_END_PATTERN.search(self.text.strip()) is not None
47
-
48
  def is_pause(self):
49
  """检查文本是否为暂停标记"""
50
  return PAUSEE_END_PATTERN.search(self.text.strip()) is not None
@@ -86,13 +86,13 @@ class TranscriptChunk:
86
  if not ck.only_punctuation()
87
  ]
88
 
89
-
90
  def get_split_first_rest(self, mode: SplitMode):
91
  chunks = self.split_by(mode)
92
  fisrt_chunk = chunks[0] if chunks else self
93
  rest_chunks = chunks[1:] if chunks else None
94
  return fisrt_chunk, rest_chunks
95
-
96
  def puncation_numbers(self) -> int:
97
  """计算片段中标点符号的数量"""
98
  return sum(1 for seg in self.items if seg.is_punctuation())
@@ -104,35 +104,35 @@ class TranscriptChunk:
104
  def join(self) -> str:
105
  """将片段连接为一个字符串"""
106
  return self.separator.join(seg.text for seg in self.items)
107
-
108
  def compare(self, chunk: Optional['TranscriptChunk'] = None) -> float:
109
  """比较当前片段与另一个片段的相似度"""
110
  if not chunk:
111
  return 0
112
-
113
  score = self._calculate_similarity(self.join(), chunk.join())
114
  logger.debug(f"Compare: {self.join()} vs {chunk.join()} : {score}")
115
  return score
116
-
117
  def only_punctuation(self)->bool:
118
  return all(seg.is_punctuation() for seg in self.items)
119
-
120
  def has_punctuation(self) -> bool:
121
  return any(seg.is_punctuation() for seg in self.items)
122
-
123
  def get_buffer_index(self) -> int:
124
  return self.items[-1].buffer_index()
125
-
126
  def is_end_sentence(self) ->bool:
127
  return self.items[-1].is_end()
128
-
129
 
130
  class TranscriptHistory:
131
  """管理转录片段的历史记录"""
132
 
133
  def __init__(self) -> None:
134
  self.history = collections.deque(maxlen=2) # 存储最近的两个片段
135
-
136
  def add(self, chunk: TranscriptChunk):
137
  """添加新的片段到历史记录"""
138
  self.history.appendleft(chunk)
@@ -144,7 +144,7 @@ class TranscriptHistory:
144
  def lastest_chunk(self):
145
  """获取最后一个片段"""
146
  return self.history[-1]
147
-
148
  def clear(self):
149
  self.history.clear()
150
 
@@ -168,7 +168,7 @@ class TranscriptBuffer:
168
 
169
  def get_seg_id(self) -> int:
170
  return self._current_seg_id
171
-
172
  @property
173
  def current_sentences_length(self) -> int:
174
  count = 0
@@ -178,7 +178,7 @@ class TranscriptBuffer:
178
  else:
179
  count += len(item)
180
  return count
181
-
182
  def update_pending_text(self, text: str) -> None:
183
  """更新临时缓冲字符串"""
184
  self._buffer = text
@@ -192,11 +192,11 @@ class TranscriptBuffer:
192
  def commit_paragraph(self) -> None:
193
  """
194
  提交当前短句为完整段落(如句子结束)
195
-
196
  Args:
197
  end_of_sentence: 是否为句子结尾(如检测到句号)
198
  """
199
-
200
  count = 0
201
  current_sentences = []
202
  while len(self._sentences): # and count < 20:
@@ -219,13 +219,13 @@ class TranscriptBuffer:
219
  output = self.split_and_join(
220
  text.replace(
221
  self._separator, ""))
222
-
223
  logger.debug("==== rebuild string ====")
224
  logger.debug(text)
225
  logger.debug(output)
226
 
227
  return output
228
-
229
  @staticmethod
230
  def split_and_join(text):
231
  tokens = []
@@ -264,7 +264,7 @@ class TranscriptBuffer:
264
  for stable_str in stable_strings:
265
  self.update_pending_text(stable_str)
266
  self.commit_line()
267
-
268
  current_text_len = len(self.current_not_commit_text.split(self._separator)) if self._separator else len(self.current_not_commit_text)
269
  # current_text_len = len(self.current_not_commit_text.split(self._separator))
270
  self.update_pending_text(remaining_string)
@@ -279,7 +279,7 @@ class TranscriptBuffer:
279
  self.update_pending_text(remaining_string)
280
  return False
281
 
282
-
283
  @property
284
  def un_commit_paragraph(self) -> str:
285
  """当前短句组合"""
@@ -298,7 +298,7 @@ class TranscriptBuffer:
298
  @property
299
  def current_not_commit_text(self) -> str:
300
  return self.un_commit_paragraph + self.pending_text
301
-
302
 
303
 
304
  class TranscriptStabilityAnalyzer:
@@ -311,8 +311,8 @@ class TranscriptStabilityAnalyzer:
311
  def merge_chunks(self, chunks: List[TranscriptChunk])->str:
312
  output = list(r.join() for r in chunks if r)
313
  return output
314
-
315
-
316
 
317
  def analysis(self, current: TranscriptChunk, buffer_duration: float) -> Iterator[TranscriptResult]:
318
  current = TranscriptChunk(items=current, separator=self._separator)
@@ -344,13 +344,13 @@ class TranscriptStabilityAnalyzer:
344
  # logger.debug("==========================")
345
 
346
  if curr_first and prev_first:
347
-
348
  core = curr_first.compare(prev_first)
349
  has_punctuation = curr_first.has_punctuation()
350
  if core >= 0.8 and has_punctuation:
351
  yield from self._yield_commit_results(curr_first, curr_rest, curr_first.is_end_sentence())
352
  return
353
-
354
  yield TranscriptResult(
355
  seg_id=self._transcript_buffer.get_seg_id(),
356
  context=self._transcript_buffer.current_not_commit_text
@@ -377,7 +377,7 @@ class TranscriptStabilityAnalyzer:
377
  stable_str_list = [stable_chunk.join()] if hasattr(stable_chunk, "join") else self.merge_chunks(stable_chunk)
378
  remaining_str_list = self.merge_chunks(remaining_chunks)
379
  frame_cut_index = stable_chunk[-1].get_buffer_index() if isinstance(stable_chunk, list) else stable_chunk.get_buffer_index()
380
-
381
  prev_seg_id = self._transcript_buffer.get_seg_id()
382
  commit_paragraph = self._transcript_buffer.update_and_commit(stable_str_list, remaining_str_list, is_end_sentence)
383
  logger.debug(f"current buffer: {self._transcript_buffer.__dict__}")
@@ -401,4 +401,4 @@ class TranscriptStabilityAnalyzer:
401
  cut_index=frame_cut_index,
402
  context=self._transcript_buffer.current_not_commit_text,
403
  )
404
-
 
34
  class TranscriptToken:
35
  """表示一个转录片段,包含文本和时间信息"""
36
  text: str # 转录的文本内容
37
+ t0: int # 开始时间(百分之一秒)
38
+ t1: int # 结束时间(百分之一秒)
39
 
40
  def is_punctuation(self):
41
  """检查文本是否包含标点符号"""
42
  return REGEX_MARKERS.search(self.text.strip()) is not None
43
+
44
  def is_end(self):
45
  """检查文本是否为句子结束标记"""
46
  return SENTENCE_END_PATTERN.search(self.text.strip()) is not None
47
+
48
  def is_pause(self):
49
  """检查文本是否为暂停标记"""
50
  return PAUSEE_END_PATTERN.search(self.text.strip()) is not None
 
86
  if not ck.only_punctuation()
87
  ]
88
 
89
+
90
  def get_split_first_rest(self, mode: SplitMode):
91
  chunks = self.split_by(mode)
92
  fisrt_chunk = chunks[0] if chunks else self
93
  rest_chunks = chunks[1:] if chunks else None
94
  return fisrt_chunk, rest_chunks
95
+
96
  def puncation_numbers(self) -> int:
97
  """计算片段中标点符号的数量"""
98
  return sum(1 for seg in self.items if seg.is_punctuation())
 
104
  def join(self) -> str:
105
  """将片段连接为一个字符串"""
106
  return self.separator.join(seg.text for seg in self.items)
107
+
108
  def compare(self, chunk: Optional['TranscriptChunk'] = None) -> float:
109
  """比较当前片段与另一个片段的相似度"""
110
  if not chunk:
111
  return 0
112
+
113
  score = self._calculate_similarity(self.join(), chunk.join())
114
  logger.debug(f"Compare: {self.join()} vs {chunk.join()} : {score}")
115
  return score
116
+
117
  def only_punctuation(self)->bool:
118
  return all(seg.is_punctuation() for seg in self.items)
119
+
120
  def has_punctuation(self) -> bool:
121
  return any(seg.is_punctuation() for seg in self.items)
122
+
123
  def get_buffer_index(self) -> int:
124
  return self.items[-1].buffer_index()
125
+
126
  def is_end_sentence(self) ->bool:
127
  return self.items[-1].is_end()
128
+
129
 
130
  class TranscriptHistory:
131
  """管理转录片段的历史记录"""
132
 
133
  def __init__(self) -> None:
134
  self.history = collections.deque(maxlen=2) # 存储最近的两个片段
135
+
136
  def add(self, chunk: TranscriptChunk):
137
  """添加新的片段到历史记录"""
138
  self.history.appendleft(chunk)
 
144
  def lastest_chunk(self):
145
  """获取最后一个片段"""
146
  return self.history[-1]
147
+
148
  def clear(self):
149
  self.history.clear()
150
 
 
168
 
169
  def get_seg_id(self) -> int:
170
  return self._current_seg_id
171
+
172
  @property
173
  def current_sentences_length(self) -> int:
174
  count = 0
 
178
  else:
179
  count += len(item)
180
  return count
181
+
182
  def update_pending_text(self, text: str) -> None:
183
  """更新临时缓冲字符串"""
184
  self._buffer = text
 
192
  def commit_paragraph(self) -> None:
193
  """
194
  提交当前短句为完整段落(如句子结束)
195
+
196
  Args:
197
  end_of_sentence: 是否为句子结尾(如检测到句号)
198
  """
199
+
200
  count = 0
201
  current_sentences = []
202
  while len(self._sentences): # and count < 20:
 
219
  output = self.split_and_join(
220
  text.replace(
221
  self._separator, ""))
222
+
223
  logger.debug("==== rebuild string ====")
224
  logger.debug(text)
225
  logger.debug(output)
226
 
227
  return output
228
+
229
  @staticmethod
230
  def split_and_join(text):
231
  tokens = []
 
264
  for stable_str in stable_strings:
265
  self.update_pending_text(stable_str)
266
  self.commit_line()
267
+
268
  current_text_len = len(self.current_not_commit_text.split(self._separator)) if self._separator else len(self.current_not_commit_text)
269
  # current_text_len = len(self.current_not_commit_text.split(self._separator))
270
  self.update_pending_text(remaining_string)
 
279
  self.update_pending_text(remaining_string)
280
  return False
281
 
282
+
283
  @property
284
  def un_commit_paragraph(self) -> str:
285
  """当前短句组合"""
 
298
  @property
299
  def current_not_commit_text(self) -> str:
300
  return self.un_commit_paragraph + self.pending_text
301
+
302
 
303
 
304
  class TranscriptStabilityAnalyzer:
 
311
  def merge_chunks(self, chunks: List[TranscriptChunk])->str:
312
  output = list(r.join() for r in chunks if r)
313
  return output
314
+
315
+
316
 
317
  def analysis(self, current: TranscriptChunk, buffer_duration: float) -> Iterator[TranscriptResult]:
318
  current = TranscriptChunk(items=current, separator=self._separator)
 
344
  # logger.debug("==========================")
345
 
346
  if curr_first and prev_first:
347
+
348
  core = curr_first.compare(prev_first)
349
  has_punctuation = curr_first.has_punctuation()
350
  if core >= 0.8 and has_punctuation:
351
  yield from self._yield_commit_results(curr_first, curr_rest, curr_first.is_end_sentence())
352
  return
353
+
354
  yield TranscriptResult(
355
  seg_id=self._transcript_buffer.get_seg_id(),
356
  context=self._transcript_buffer.current_not_commit_text
 
377
  stable_str_list = [stable_chunk.join()] if hasattr(stable_chunk, "join") else self.merge_chunks(stable_chunk)
378
  remaining_str_list = self.merge_chunks(remaining_chunks)
379
  frame_cut_index = stable_chunk[-1].get_buffer_index() if isinstance(stable_chunk, list) else stable_chunk.get_buffer_index()
380
+
381
  prev_seg_id = self._transcript_buffer.get_seg_id()
382
  commit_paragraph = self._transcript_buffer.update_and_commit(stable_str_list, remaining_str_list, is_end_sentence)
383
  logger.debug(f"current buffer: {self._transcript_buffer.__dict__}")
 
401
  cut_index=frame_cut_index,
402
  context=self._transcript_buffer.current_not_commit_text,
403
  )
404
+