Grinding commited on
Commit
b61bb62
·
verified ·
1 Parent(s): b6227b0

Update app/processing.py

Browse files
Files changed (1) hide show
  1. app/processing.py +35 -4
app/processing.py CHANGED
@@ -26,8 +26,35 @@ except Exception as e:
26
  logger.error(f"Failed to initialize Groq client: {e}")
27
 
28
  # --- Prompts ---
29
- SUMMARIZATION_SYSTEM_PROMPT = "..." # Keep your prompt as is
30
- ACTION_ITEMS_SYSTEM_PROMPT = "..." # Keep your prompt as is
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # --- Core Functions ---
33
 
@@ -104,18 +131,22 @@ async def run_pipeline(task_id: str, file_path: Path, tasks_db: dict):
104
 
105
  summary_task = asyncio.to_thread(
106
  groq_client.chat.completions.create,
107
- model="llama3-70b-8192",
108
  messages=[{"role": "system", "content": SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": full_transcript}],
109
  temperature=0.2,
 
 
110
  max_tokens=1024
111
  )
112
 
113
  action_item_task = asyncio.to_thread(
114
  groq_client.chat.completions.create,
115
- model="llama3-70b-8192",
116
  messages=[{"role": "system", "content": ACTION_ITEMS_SYSTEM_PROMPT}, {"role": "user", "content": full_transcript}],
117
  temperature=0.1,
 
118
  max_tokens=1024,
 
119
  response_format={"type": "json_object"}
120
  )
121
 
 
26
  logger.error(f"Failed to initialize Groq client: {e}")
27
 
28
  # --- Prompts ---
29
+ SUMMARIZATION_SYSTEM_PROMPT = """
30
+ You are an expert AI assistant specializing in creating concise, structured, and insightful summaries of meeting and lecture transcripts. Your goal is to distill the most critical information into a format that is easy to read and act upon.
31
+
32
+ Instructions:
33
+ 1. **Identify Core Themes**: Begin by identifying the main topics and objectives discussed.
34
+ 2. **Extract Key Decisions**: Pinpoint any decisions that were made, including the rationale behind them if available.
35
+ 3. **Highlight Main Outcomes**: Detail the primary results or conclusions reached during the discussion.
36
+ 4. **Structure the Output**: Present the summary in a clean, professional format. Use bullet points for clarity.
37
+ 5. **Maintain Neutrality**: The summary should be objective and free of personal interpretation or bias and JSON.
38
+ """
39
+ ACTION_ITEMS_SYSTEM_PROMPT = """
40
+ You are a highly specialized AI assistant tasked with identifying and extracting actionable tasks, commitments, and deadlines from a meeting or lecture transcript. Your output must be clear, concise, and formatted as a JSON object.
41
+
42
+ Instructions:
43
+ 1. **Identify Actionable Language**: Scan the text for phrases indicating a task, such as "will send," "is responsible for," "we need to," "I'll follow up on," etc.
44
+ 2. **Extract Key Components**: For each action item, identify the assigned person (if mentioned), the specific task, and any deadlines.
45
+ 3. **Format as JSON**: Return a single JSON object with a key "action_items". The value should be a list of strings, where each string is a clearly defined action item.
46
+ 4. **Be Precise**: If no specific person is assigned, state the action generally. If no deadline is mentioned, do not invent one.
47
+ 5. **Handle No Actions**: If no action items are found, return a JSON object with an empty list: {"action_items": []}.
48
+
49
+ Example Output:
50
+ {
51
+ "action_items": [
52
+ "Alice will send the final budget report by Friday.",
53
+ "Bob is responsible for updating the project timeline.",
54
+ "The marketing strategy needs to be finalized by next week's meeting."
55
+ ]
56
+ }
57
+ """
58
 
59
  # --- Core Functions ---
60
 
 
131
 
132
  summary_task = asyncio.to_thread(
133
  groq_client.chat.completions.create,
134
+ model="qwen/qwen3-32b",
135
  messages=[{"role": "system", "content": SUMMARIZATION_SYSTEM_PROMPT}, {"role": "user", "content": full_transcript}],
136
  temperature=0.2,
137
+ reasoning_effort="default",
138
+ reasoning_format: "hidden"
139
  max_tokens=1024
140
  )
141
 
142
  action_item_task = asyncio.to_thread(
143
  groq_client.chat.completions.create,
144
+ model="qwen/qwen3-32b",
145
  messages=[{"role": "system", "content": ACTION_ITEMS_SYSTEM_PROMPT}, {"role": "user", "content": full_transcript}],
146
  temperature=0.1,
147
+ reasoning_effort="default",
148
  max_tokens=1024,
149
+ reasoning_format: "hidden"
150
  response_format={"type": "json_object"}
151
  )
152