scmlewis commited on
Commit
1eec425
·
verified ·
1 Parent(s): 79c71f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -63
app.py CHANGED
@@ -1,78 +1,72 @@
 
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Defensive model loading with error log
5
- try:
6
- classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
7
- except Exception as e:
8
- classifier = None
9
- print(f"Error loading zero-shot classifier: {e}")
10
 
11
- try:
12
- generator = pipeline("text-generation", model="gpt2", max_length=150)
13
- except Exception as e:
14
- generator = None
15
- print(f"Error loading generator: {e}")
16
 
17
- try:
18
- summarizer = pipeline("summarization")
19
- except Exception as e:
20
- summarizer = None
21
- print(f"Error loading summarizer: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
23
  def email_assistant(email_text):
24
  try:
25
- candidate_labels = ["Sales", "Support", "Spam", "Finance", "General"]
26
- # Category & Confidence
27
- if classifier:
28
- classification = classifier(email_text, candidate_labels)
29
- category = classification["labels"][0]
30
- confidence = round(classification["scores"][0]*100, 2)
31
- else:
32
- category, confidence = "Error", "Error"
33
- # Priority
 
 
 
 
 
 
 
34
  try:
35
- # Defensive logic for type safety if confidence is not a float
36
- priority = "High" if category in ["Sales", "Support"] and isinstance(confidence, float) and confidence > 80 else "Normal"
 
 
 
 
 
 
 
37
  except Exception:
38
- priority = "Error"
39
- # Suggested Action
40
- forwarding_emails = {
41
- "Sales": "sales@company.com",
42
- "Support": "support@company.com",
43
- "Finance": "finance@company.com",
44
- "Spam": None,
45
- "General": "info@company.com"
46
- }
47
- suggested_action = f"Forward to {forwarding_emails.get(category, 'Unknown')}" if forwarding_emails.get(category) else "No action suggested"
48
- # Draft Response
49
- response_prompt = f"Write a professional reply to this email:\n\n{email_text}\n\nResponse:"
50
- if generator:
51
- response = generator(response_prompt, max_length=150, num_return_sequences=1)[0]["generated_text"]
52
- else:
53
- response = "Error generating response."
54
- # Action Items
55
- if summarizer:
56
- action_items = summarizer(email_text, max_length=50, min_length=10, do_sample=False)[0]["summary_text"]
57
- else:
58
- action_items = "Error extracting action items."
59
- return [
60
- category,
61
- f"{confidence}%",
62
- priority,
63
- suggested_action,
64
- response,
65
- action_items
66
- ]
67
  except Exception as e:
68
- print(f"Error in main function: {e}")
69
- return [
70
- "Error", "Error", "Error", "Error", "Error", "Error"
71
- ]
72
 
73
  with gr.Blocks() as demo:
74
- gr.Markdown("# Email Classification & Response Assistant")
75
- email_input = gr.TextArea(label="Paste your email here")
76
  submit = gr.Button("Analyze Email")
77
  output_category = gr.Textbox(label="Category")
78
  output_confidence = gr.Textbox(label="Confidence")
 
1
+ import os
2
  import gradio as gr
3
+ import requests
4
 
5
+ # Get Groq API key from environment
6
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
 
 
 
 
7
 
8
+ # Groq endpoints (you may need to adjust according to the latest docs or your selected model)
9
+ GROQ_COMPLETION_URL = "https://api.groq.com/openai/v1/chat/completions" # for LLM chat/completion
 
 
 
10
 
11
+ # Utility: Groq API request for LLM tasks (chat/completion)
12
+ def groq_completion(prompt, system_prompt=None):
13
+ headers = {
14
+ "Authorization": f"Bearer {GROQ_API_KEY}",
15
+ "Content-Type": "application/json"
16
+ }
17
+ body = {
18
+ "model": "mixtral-8x7b-32768", # Or change to another supported Groq model
19
+ "messages": [
20
+ {"role": "system", "content": system_prompt or "You are a helpful assistant."},
21
+ {"role": "user", "content": prompt}
22
+ ],
23
+ "temperature": 0.3,
24
+ "max_tokens": 512
25
+ }
26
+ response = requests.post(GROQ_COMPLETION_URL, headers=headers, json=body)
27
+ response.raise_for_status()
28
+ return response.json()["choices"][0]["message"]["content"]
29
 
30
+ # Use LLM prompts for multi-task extraction
31
  def email_assistant(email_text):
32
  try:
33
+ # You can extract "Subject" and "From" programmatically, or just instruct the user to paste email in a template
34
+ prompt = f"""You will receive an email message. Analyze its metadata and text and extract:
35
+ - Category (Sales, Support, Spam, Finance, General)
36
+ - Confidence (as a percentage)
37
+ - Priority (High/Normal/Low)
38
+ - Suggested forwarding email or action
39
+ - Professional draft response
40
+ - Checklist of action items
41
+
42
+ Return results in JSON format.
43
+
44
+ Email: {email_text}
45
+ """
46
+ result = groq_completion(prompt)
47
+ # Try to parse JSON for Gradio outputs, fallback to plain output
48
+ import json
49
  try:
50
+ output = json.loads(result)
51
+ return [
52
+ output.get('Category', ''),
53
+ output.get('Confidence', ''),
54
+ output.get('Priority', ''),
55
+ output.get('Suggested Action', ''),
56
+ output.get('Draft Response', ''),
57
+ output.get('Action Items', ''),
58
+ ]
59
  except Exception:
60
+ # If not JSON, gracefully show whole model output in all fields
61
+ return [result]*6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  except Exception as e:
63
+ print(f"Error in Groq API call: {e}")
64
+ err = "Error"
65
+ return [err]*6
 
66
 
67
  with gr.Blocks() as demo:
68
+ gr.Markdown("# Email Classification & Response Assistant (Groq powered)")
69
+ email_input = gr.TextArea(label="Paste your email here (include Subject, From, Body if possible)")
70
  submit = gr.Button("Analyze Email")
71
  output_category = gr.Textbox(label="Category")
72
  output_confidence = gr.Textbox(label="Confidence")