ming Claude commited on
Commit
bd7d2c1
·
1 Parent(s): 0072188

style: apply ruff formatting to structured_summarizer

Browse files

Auto-formatted by ruff during pre-commit hook.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

app/services/structured_summarizer.py CHANGED
@@ -141,7 +141,9 @@ class StructuredSummarizer:
141
  )
142
  logger.info("✅ Using SDPA attention (optimized)")
143
  except Exception:
144
- logger.warning("⚠️ SDPA not supported, falling back to default attention")
 
 
145
  self.model = AutoModelForCausalLM.from_pretrained(
146
  settings.v4_model_id,
147
  device_map="auto",
@@ -170,7 +172,9 @@ class StructuredSummarizer:
170
  )
171
  logger.info("✅ Using SDPA attention (optimized)")
172
  except Exception:
173
- logger.warning("⚠️ SDPA not supported, falling back to default attention")
 
 
174
  self.model = AutoModelForCausalLM.from_pretrained(
175
  settings.v4_model_id,
176
  torch_dtype=torch.float16,
@@ -191,7 +195,9 @@ class StructuredSummarizer:
191
  )
192
  logger.info("✅ Using SDPA attention (optimized)")
193
  except Exception:
194
- logger.warning("⚠️ SDPA not supported, falling back to default attention")
 
 
195
  self.model = AutoModelForCausalLM.from_pretrained(
196
  settings.v4_model_id,
197
  torch_dtype=torch.float16,
@@ -222,7 +228,9 @@ class StructuredSummarizer:
222
  )
223
  logger.info("✅ Using SDPA attention (optimized)")
224
  except Exception:
225
- logger.warning("⚠️ SDPA not supported, falling back to default attention")
 
 
226
  self.model = AutoModelForCausalLM.from_pretrained(
227
  settings.v4_model_id,
228
  torch_dtype=base_dtype,
@@ -247,7 +255,9 @@ class StructuredSummarizer:
247
  )
248
  logger.info("✅ Using SDPA attention (optimized)")
249
  except Exception:
250
- logger.warning("⚠️ SDPA not supported, falling back to default attention")
 
 
251
  self.model = AutoModelForCausalLM.from_pretrained(
252
  settings.v4_model_id,
253
  torch_dtype=base_dtype,
 
141
  )
142
  logger.info("✅ Using SDPA attention (optimized)")
143
  except Exception:
144
+ logger.warning(
145
+ "⚠️ SDPA not supported, falling back to default attention"
146
+ )
147
  self.model = AutoModelForCausalLM.from_pretrained(
148
  settings.v4_model_id,
149
  device_map="auto",
 
172
  )
173
  logger.info("✅ Using SDPA attention (optimized)")
174
  except Exception:
175
+ logger.warning(
176
+ "⚠️ SDPA not supported, falling back to default attention"
177
+ )
178
  self.model = AutoModelForCausalLM.from_pretrained(
179
  settings.v4_model_id,
180
  torch_dtype=torch.float16,
 
195
  )
196
  logger.info("✅ Using SDPA attention (optimized)")
197
  except Exception:
198
+ logger.warning(
199
+ "⚠️ SDPA not supported, falling back to default attention"
200
+ )
201
  self.model = AutoModelForCausalLM.from_pretrained(
202
  settings.v4_model_id,
203
  torch_dtype=torch.float16,
 
228
  )
229
  logger.info("✅ Using SDPA attention (optimized)")
230
  except Exception:
231
+ logger.warning(
232
+ "⚠️ SDPA not supported, falling back to default attention"
233
+ )
234
  self.model = AutoModelForCausalLM.from_pretrained(
235
  settings.v4_model_id,
236
  torch_dtype=base_dtype,
 
255
  )
256
  logger.info("✅ Using SDPA attention (optimized)")
257
  except Exception:
258
+ logger.warning(
259
+ "⚠️ SDPA not supported, falling back to default attention"
260
+ )
261
  self.model = AutoModelForCausalLM.from_pretrained(
262
  settings.v4_model_id,
263
  torch_dtype=base_dtype,