Grinding commited on
Commit
afc4870
·
verified ·
1 Parent(s): 27f255b

Update app/processing.py

Browse files
Files changed (1) hide show
  1. app/processing.py +3 -2
app/processing.py CHANGED
@@ -96,7 +96,7 @@ async def process_transcript_chunk(chunk_index: int, chunk_text: str):
96
  try:
97
  summary_task = asyncio.to_thread(
98
  groq_client.chat.completions.create,
99
- model="qwen/qwen3-32b",
100
  messages=[{"role": "system", "content": CHUNK_SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": chunk_text}],
101
  temperature=0.2,
102
  max_tokens=512
@@ -104,7 +104,7 @@ async def process_transcript_chunk(chunk_index: int, chunk_text: str):
104
 
105
  action_task = asyncio.to_thread(
106
  groq_client.chat.completions.create,
107
- model="qwen/qwen3-32b",
108
  messages=[{"role": "system", "content": ACTION_ITEMS_SYSTEM_PROMPT}, {"role": "user", "content": chunk_text}],
109
  temperature=0.1,
110
  max_tokens=512,
@@ -214,6 +214,7 @@ async def run_pipeline(task_id: str, file_path: Path, tasks_db: dict):
214
  model="qwen/qwen3-32b",
215
  messages=[{"role": "system", "content": FINAL_SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": combined_summaries}],
216
  temperature=0.2,
 
217
  max_tokens=1024
218
  )
219
 
 
96
  try:
97
  summary_task = asyncio.to_thread(
98
  groq_client.chat.completions.create,
99
+ model="gemma2-9b-it",
100
  messages=[{"role": "system", "content": CHUNK_SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": chunk_text}],
101
  temperature=0.2,
102
  max_tokens=512
 
104
 
105
  action_task = asyncio.to_thread(
106
  groq_client.chat.completions.create,
107
+ model="llama-3.1-8b-instant",
108
  messages=[{"role": "system", "content": ACTION_ITEMS_SYSTEM_PROMPT}, {"role": "user", "content": chunk_text}],
109
  temperature=0.1,
110
  max_tokens=512,
 
214
  model="qwen/qwen3-32b",
215
  messages=[{"role": "system", "content": FINAL_SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": combined_summaries}],
216
  temperature=0.2,
217
+ reasoning_format="hidden",
218
  max_tokens=1024
219
  )
220