Muthuraja18 commited on
Commit
e32a9cc
ยท
verified ยท
1 Parent(s): ce92fed
Files changed (1) hide show
  1. app.py +144 -343
app.py CHANGED
@@ -1,362 +1,163 @@
1
- import PyPDF2
2
  import streamlit as st
3
- from transformers import pipeline
4
- import io
5
- from datetime import datetime, timedelta
6
- import gspread
7
- from google.oauth2.service_account import Credentials
8
- import pandas as pd
9
- import matplotlib.pyplot as plt
10
- from io import BytesIO
11
- from reportlab.lib.pagesizes import letter
12
- from reportlab.pdfgen import canvas
13
- import speech_recognition as sr # Speech recognition package
14
- import plotly.express as px # For interactive charts
15
-
16
- # Google Sheets setup
17
- SCOPE = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
18
- CREDS_PATH = "modern-cycling-444916-g6-82c207d3eb47.json" # Provide your Google credentials path
19
-
20
- # Initialize the Hugging Face QA pipeline
21
- qa_pipeline = pipeline("question-answering")
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Initialize Sentiment Analysis Pipeline using Hugging Face
24
- sentiment_model = pipeline('sentiment-analysis')
25
 
26
- # Initialize Google Sheets connection
27
- def initialize_google_sheets():
28
- credentials = Credentials.from_service_account_file(CREDS_PATH, scopes=SCOPE)
29
  try:
30
- client = gspread.authorize(credentials)
31
- sheet = client.open("SalesStores").sheet1 # Change Google Sheet name to "SalesStores"
32
- return sheet
33
- except gspread.exceptions.APIError as e:
34
- st.error(f"Google Sheets API error: {e}")
35
- return None
36
-
37
- sheet = initialize_google_sheets()
38
-
39
- # Function to extract text from PDF using PyPDF2
40
- def extract_pdf_text_with_pypdf(pdf_file):
41
- pdf_text = ""
42
- with io.BytesIO(pdf_file.read()) as pdf_data:
43
- pdf_reader = PyPDF2.PdfReader(pdf_data)
44
- for page_num in range(len(pdf_reader.pages)):
45
- page = pdf_reader.pages[page_num]
46
- pdf_text += page.extract_text()
47
- return pdf_text.strip()
48
-
49
- # Function to answer a query using Hugging Face's QA pipeline
50
- def answer_query(question, context):
51
- result = qa_pipeline(question=question, context=context)
52
- return result['answer']
53
-
54
- # Function to analyze sentiment using Hugging Face's pre-trained model
55
- def analyze_sentiment(text):
56
- sentiment = sentiment_model(text)[0] # Output is a list of dictionaries
57
- label = sentiment['label']
58
- score = sentiment['score']
59
-
60
- # Define sentiment labels
61
- if label == "POSITIVE" and score > 0.6:
62
- sentiment_description = "Positive"
63
- elif label == "NEGATIVE" and score < 0.4: # Adjust threshold for negative sentiment
64
- sentiment_description = "Negative"
65
- else:
66
- sentiment_description = "Neutral"
67
-
68
- return score, sentiment_description
69
-
70
- # Function to update Google Sheets without product name
71
- def update_sheet_without_product(sentiment_score, sentiment_description, relevant_answer):
72
- if sheet:
73
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
74
- sheet.append_row([timestamp, sentiment_description, sentiment_score, relevant_answer, "No Product Name"])
75
- else:
76
- st.error("Google Sheets connection not initialized.")
77
 
78
- # Function to suggest product recommendations based on sentiment or query
79
- def suggest_product_recommendations(sentiment_description, query):
80
- recommendations = []
81
 
82
- # Recognize product types based on the query
83
- if "laptop" in query.lower():
84
- if sentiment_description == "Positive":
85
- recommendations = [
86
- "Check out this high-performance gaming laptop!",
87
- "How about this ultrabook for portability?",
88
- "This laptop has excellent reviews โ€“ perfect for professionals!"
89
- ]
90
- elif sentiment_description == "Negative":
91
- recommendations = [
92
- "Perhaps you might prefer these affordable laptops instead.",
93
- "Looking for a different brand? Check these out!"
94
- ]
95
- else:
96
- recommendations = [
97
- "Here are some of the best laptops available right now."
98
- ]
99
- elif "smartphone" in query.lower():
100
- if sentiment_description == "Positive":
101
- recommendations = [
102
- "This smartphone has fantastic features, check it out!",
103
- "Here's a popular smartphone with excellent camera quality."
104
- ]
105
- elif sentiment_description == "Negative":
106
- recommendations = [
107
- "Maybe youโ€™d like to consider another smartphone brand?",
108
- "These budget smartphones offer better value for money."
109
- ]
110
- else:
111
- recommendations = [
112
- "Hereโ€™s a list of the latest smartphones that might interest you."
113
- ]
114
- elif "headphones" in query.lower():
115
- if sentiment_description == "Positive":
116
- recommendations = [
117
- "These wireless headphones offer superior sound quality.",
118
- "Looking for noise-cancelling headphones? Try these."
119
- ]
120
- elif sentiment_description == "Negative":
121
- recommendations = [
122
- "Consider these alternative headphones with better reviews.",
123
- "These highly-rated, affordable options might suit your needs."
124
- ]
125
- else:
126
- recommendations = [
127
- "Check out these top-rated headphones for music lovers!"
128
- ]
129
- elif "tablet" in query.lower():
130
- if sentiment_description == "Positive":
131
- recommendations = [
132
- "Check out this lightweight tablet with amazing battery life.",
133
- "This tablet has incredible processing power and an excellent screen."
134
- ]
135
- elif sentiment_description == "Negative":
136
- recommendations = [
137
- "Hereโ€™s an alternative tablet with a better display.",
138
- "These budget tablets might be more up your alley."
139
- ]
140
- else:
141
- recommendations = [
142
- "Looking for a tablet? Here are some of the best options right now."
143
- ]
144
- elif "camera" in query.lower():
145
- if sentiment_description == "Positive":
146
- recommendations = [
147
- "This camera offers stunning image quality for professionals.",
148
- "Perfect for vloggers! Check out this high-quality camera."
149
- ]
150
- elif sentiment_description == "Negative":
151
- recommendations = [
152
- "Maybe youโ€™d prefer these budget-friendly cameras with better features.",
153
- "Here are some alternatives for beginner photographers."
154
- ]
155
- else:
156
- recommendations = [
157
- "Explore these top-rated cameras for your photography needs!"
158
- ]
159
  else:
160
- recommendations = [
161
- "Based on your query, here are some great product options across various categories."
162
- ]
163
-
164
- return recommendations
165
-
166
- # Function to filter data by date
167
- def filter_data_by_date(data, date_filter):
168
- if date_filter == "Today":
169
- start_date = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
170
- data = data[data['Timestamp'] >= start_date]
171
- elif date_filter == "One Week":
172
- start_date = datetime.now() - timedelta(weeks=1)
173
- data = data[data['Timestamp'] >= start_date]
174
- return data
175
-
176
- # Function to generate PDF for the call history
177
- def generate_pdf(data):
178
- buffer = BytesIO()
179
- c = canvas.Canvas(buffer, pagesize=letter)
180
- c.setFont("Helvetica", 10)
181
- y_position = 750
182
- c.drawString(30, y_position, "Call History Report")
183
- y_position -= 20
184
- for index, row in data.iterrows():
185
- c.drawString(30, y_position, f"Sentiment: {row['Sentiment']}, Answer: {row['Answer']}, Product: {row['Product Name']}")
186
- y_position -= 15
187
- if y_position <= 40:
188
- c.showPage()
189
- c.setFont("Helvetica", 10)
190
- y_position = 750
191
- c.save()
192
- buffer.seek(0)
193
- return buffer
194
-
195
- # Function to listen to speech and convert it to text
196
- def listen_to_speech():
197
- recognizer = sr.Recognizer()
198
- with sr.Microphone() as source:
199
- recognizer.adjust_for_ambient_noise(source) # Adjust for background noise
200
- st.write("Listening...") # Optional: Add a message to indicate listening state
201
- try:
202
- audio = recognizer.listen(source, timeout=5, phrase_time_limit=10) # Listen for the audio input
203
- st.write("Recognizing...") # Optional: Add a message for recognition process
204
- text = recognizer.recognize_google(audio) # Use Google's speech recognition to convert audio to text
205
- st.write(f"Recognized: {text}")
206
- return text # Return the text detected from the audio
207
- except sr.UnknownValueError:
208
- st.error("Sorry, I could not understand the audio.") # Handle case when the audio is unclear
209
- return None
210
- except sr.RequestError:
211
- st.error("Could not request results from Google Speech Recognition service.") # Handle network issues
212
- return None
213
- except Exception as e:
214
- st.error(f"An error occurred: {e}")
215
- return None
216
-
217
- # Function to suggest related follow-up questions based on the answer
218
- def suggest_related_questions():
219
- related_questions = [
220
- "Can you explain more about the product?",
221
- "What are the features of this product?",
222
- "How does it compare to other products?",
223
- "Can I get more details about the specifications?",
224
- "What is the price of the product?"
225
- ]
226
- return related_questions
227
-
228
- # Dashboard functions
229
- def display_dashboard():
230
- st.title("Customer Query Dashboard")
231
-
232
- # Adding a background color and styles to enhance the dashboard appearance
233
- st.markdown("""
234
- <style>
235
- .stApp {
236
- background-color: #f0f4f8;
237
  }
238
- .stButton>button {
239
- background-color: #4CAF50;
240
- color: white;
241
- border-radius: 10px;
242
- padding: 10px 20px;
243
- }
244
- .stTextInput>div>input {
245
- border-radius: 10px;
246
- border: 2px solid #4CAF50;
247
- padding: 10px;
248
- }
249
- </style>
250
- """, unsafe_allow_html=True)
251
-
252
- # Displaying a greeting message with animation
253
- st.balloons() # Adding confetti animation when the page loads
254
-
255
- if sheet:
256
- data = pd.DataFrame(sheet.get_all_records()) # Load all rows into a DataFrame
257
-
258
- # Ensure the Timestamp column exists and is in datetime format
259
- if 'Timestamp' in data.columns:
260
- data['Timestamp'] = pd.to_datetime(data['Timestamp'])
261
-
262
- # Add a date filter to the dashboard
263
- date_filter = st.selectbox("Filter by Date", ["All Time", "Today", "One Week"])
264
-
265
- # Filter data based on the selected date range
266
- if date_filter != "All Time":
267
- data = filter_data_by_date(data, date_filter)
268
-
269
- # Check if the required columns are present
270
- if 'Sentiment' in data.columns and 'Answer' in data.columns:
271
- # Filter by product (Amazon or Flipkart)
272
- product_filter = st.selectbox("Select Product", ["All", "Amazon", "Flipkart"])
273
-
274
- if product_filter != "All":
275
- data = data[data['Product Name'] == product_filter]
276
-
277
- # Plot sentiment distribution
278
- sentiment_counts = data['Sentiment'].value_counts()
279
-
280
- # Plot Sentiment Distribution using Plotly for better interactivity
281
- st.subheader("Sentiment Distribution")
282
- fig = px.bar(x=sentiment_counts.index, y=sentiment_counts.values,
283
- labels={'x': 'Sentiment', 'y': 'Frequency'},
284
- color=sentiment_counts.index,
285
- color_discrete_map={"POSITIVE": "green", "NEGATIVE": "red", "NEUTRAL": "gray"})
286
- st.plotly_chart(fig)
287
-
288
- # Call Activity Statistics
289
- total_calls = len(data)
290
- avg_sentiment = data['Sentiment'].apply(lambda x: 1 if x == 'Positive' else -1 if x == 'Negative' else 0).mean()
291
- avg_sentiment = round(avg_sentiment, 2)
292
-
293
- st.subheader("Call Activity Statistics")
294
- st.write(f"Total Calls: {total_calls}")
295
- st.write(f"Average Sentiment: {avg_sentiment}")
296
-
297
- # Download option for the entire history (PDF)
298
- pdf = generate_pdf(data)
299
- st.download_button(
300
- label="Download Call History as PDF",
301
- data=pdf,
302
- file_name="call_history.pdf",
303
- mime="application/pdf"
304
- )
305
-
306
- else:
307
- st.error("The required columns (Sentiment, Answer) are not found in the data.")
308
- st.write("Check the data structure in the Google Sheet to make sure the columns are correct.")
309
-
310
- # Main Streamlit UI and workflow
311
- def main():
312
- st.title('Real-Time Customer Query Analysis & Call History')
313
-
314
- # Sidebar Navigation
315
- sidebar_option = st.sidebar.selectbox("Select an Option", ["Dashboard", "Call Analysis"])
316
 
317
- if sidebar_option == "Dashboard":
318
- display_dashboard()
 
319
 
320
- elif sidebar_option == "Call Analysis":
321
- # Upload PDF file
322
- uploaded_pdf = st.file_uploader("Upload a PDF file", type="pdf")
323
- if uploaded_pdf:
324
- pdf_text = extract_pdf_text_with_pypdf(uploaded_pdf)
325
 
326
- if not pdf_text:
327
- st.error("No text could be extracted from the PDF.")
328
- return
329
 
330
- # Speech recognition button
331
- if st.button("Start Speech Recognition"):
332
- user_input = listen_to_speech()
333
- if user_input:
334
- # Sentiment Analysis
335
- sentiment_score, sentiment_description = analyze_sentiment(user_input)
336
 
337
- # Answer the query using the Hugging Face QA pipeline
338
- answer = answer_query(user_input, pdf_text)
339
- st.write(f"Answer: {answer}")
340
 
341
- # Display Sentiment Result
342
- st.write(f"Sentiment: {sentiment_description} (Score: {sentiment_score:.2f})")
 
343
 
344
- # Get product recommendations
345
- recommendations = suggest_product_recommendations(sentiment_description, user_input)
346
- st.subheader("Product Recommendations")
347
- for recommendation in recommendations:
348
- st.write(f"- {recommendation}")
349
 
350
- # Store the query and the response in Google Sheets
351
- update_sheet_without_product(sentiment_score, sentiment_description, answer)
 
352
 
353
- st.write("Query and answer saved in Call History!")
 
354
 
355
- # Suggest related follow-up questions
356
- st.subheader("Related Follow-up Questions")
357
- related_questions = suggest_related_questions()
358
- for question in related_questions:
359
- st.write(f"- {question}")
360
 
361
- if __name__ == "__main__":
362
- main()
 
 
1
  import streamlit as st
2
+ import requests
3
+ import os
4
+ from hashlib import sha256
5
+ import random
6
+
7
+ # ========== CONFIG ==========
8
+ GROQ_API_KEY = "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" # Replace with your actual key
9
+ GROQ_MODEL = "llama3-8b-8192" # Recommended current Groq model
10
+
11
+ # ========== STATE ==========
12
+ if "last_result" not in st.session_state:
13
+ st.session_state.last_result = None
14
+ if "last_candidate" not in st.session_state:
15
+ st.session_state.last_candidate = None
16
+
17
+ # ========== GROQ HELPERS ==========
18
+
19
+ def generate_questions(domain: str, round_type: str):
20
+ prompt = f"""
21
+ Generate 3 {round_type} interview questions for a candidate in the domain of {domain}.
22
+ Questions should be clear, concise, and assess relevant skills.
23
+ """
24
+ headers = {"Authorization": f"Bearer {GROQ_API_KEY}", "Content-Type": "application/json"}
25
+ data = {
26
+ "model": GROQ_MODEL,
27
+ "messages": [{"role": "user", "content": prompt}],
28
+ "temperature": 0.7,
29
+ "max_tokens": 400,
30
+ }
31
 
32
+ response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=data)
 
33
 
 
 
 
34
  try:
35
+ res_json = response.json()
36
+ return [q.strip("- ").strip() for q in res_json['choices'][0]['message']['content'].split("\n") if q.strip()]
37
+ except Exception as e:
38
+ st.error(f"Groq API Error: {e}")
39
+ st.json(response.json())
40
+ return ["Question 1", "Question 2", "Question 3"]
41
+
42
+ def generate_programming_question(domain: str, language: str):
43
+ prompt = f"""
44
+ Generate 1 beginner-to-intermediate level programming interview question in {language} for a candidate applying in the domain of {domain}.
45
+ Provide only the question without solution or explanation.
46
+ """
47
+ headers = {"Authorization": f"Bearer {GROQ_API_KEY}", "Content-Type": "application/json"}
48
+ data = {
49
+ "model": GROQ_MODEL,
50
+ "messages": [{"role": "user", "content": prompt}],
51
+ "temperature": 0.7,
52
+ "max_tokens": 300,
53
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=data)
 
 
56
 
57
+ try:
58
+ return response.json()['choices'][0]['message']['content']
59
+ except Exception as e:
60
+ st.error(f"Groq API Error: {e}")
61
+ st.json(response.json())
62
+ return "Write a function to reverse a string."
63
+
64
+ # ========== CORE FUNCTIONALITY ==========
65
+
66
+ def check_resume_originality(uploaded_file):
67
+ content = uploaded_file.read()
68
+ resume_hash = sha256(content).hexdigest()
69
+ existing_hashes = ["abc123", "def456"] # Dummy hashes - replace with real hashes database
70
+ return 20 if resume_hash in existing_hashes else 95
71
+
72
+ def save_to_crm(name, domain, result):
73
+ # Placeholder for CRM integration - replace with real CRM API calls
74
+ print(f"[CRM] Candidate: {name}, Domain: {domain}, Result: {result}")
75
+
76
+ def show_dashboard():
77
+ st.title("๐Ÿ“Š Dashboard")
78
+ if st.session_state.last_result:
79
+ st.subheader("Latest Candidate Summary")
80
+ total_score = sum(st.session_state.last_result.values()) / len(st.session_state.last_result)
81
+ st.metric("Overall Score", f"{total_score:.2f}%")
82
+ st.progress(int(total_score))
83
+ for k, v in st.session_state.last_result.items():
84
+ st.write(f"**{k.replace('_', ' ').title()}**: {v:.2f}%")
85
+ st.info(f"Last candidate: {st.session_state.last_candidate}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  else:
87
+ st.warning("No interview data available yet.")
88
+
89
+ def start_interview(domain, language):
90
+ result = {}
91
+
92
+ st.subheader("๐Ÿ”ข Round 1: Aptitude")
93
+ aptitude_qs = generate_questions(domain, "aptitude")
94
+ aptitude_score = 0
95
+ for i, q in enumerate(aptitude_qs):
96
+ ans = st.text_input(f"Aptitude Q{i+1}: {q}", key=f"apt{i}")
97
+ if ans:
98
+ aptitude_score += 1
99
+ result['aptitude_score'] = (aptitude_score / len(aptitude_qs)) * 100
100
+
101
+ st.subheader(f"๐Ÿ’ป Round 2: Programming in {language}")
102
+ prog_q = generate_programming_question(domain, language)
103
+ st.markdown(f"**Problem:** {prog_q}")
104
+ code_ans = st.text_area(f"Write your solution in {language}", key="code")
105
+ result['code_score'] = 90 if ("def" in code_ans or "class" in code_ans) and len(code_ans) > 20 else 40
106
+
107
+ st.subheader("๐Ÿ’ฌ Round 3: HR Interview")
108
+ hr_qs = generate_questions(domain, "HR")
109
+ hr_score = 0
110
+ for i, q in enumerate(hr_qs):
111
+ ans = st.text_area(f"HR Q{i+1}: {q}", key=f"hr{i}")
112
+ hr_score += len(ans.split()) > 15
113
+ result['hr_score'] = (hr_score / len(hr_qs)) * 100
114
+
115
+ st.subheader("๐Ÿ—ฃ๏ธ Round 4: Communication")
116
+ result['communication_score'] = random.randint(70, 90)
117
+
118
+ # --- Show final marks summary ---
119
+ st.markdown("---")
120
+ st.header("๐Ÿ“‹ Interview Summary")
121
+ rounds = {
122
+ "Aptitude": result['aptitude_score'],
123
+ "Programming": result['code_score'],
124
+ "HR Interview": result['hr_score'],
125
+ "Communication": result['communication_score'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ total = sum(rounds.values()) / len(rounds)
129
+ for round_name, score in rounds.items():
130
+ st.write(f"**{round_name}:** {score:.2f}%")
131
 
132
+ st.write(f"### Overall Score: {total:.2f}%")
133
+ st.progress(int(total))
 
 
 
134
 
135
+ return result
 
 
136
 
137
+ # ========== MAIN APP ==========
138
+ st.set_page_config(page_title="AI Interview System", layout="centered")
139
+ page = st.sidebar.radio("๐Ÿ“Œ Navigate", ["Interview", "Dashboard"])
 
 
 
140
 
141
+ if page == "Interview":
142
+ st.title("๐Ÿง  AI Interview System")
 
143
 
144
+ uploaded_resume = st.file_uploader("๐Ÿ“„ Upload Resume (PDF/DOCX)", type=['pdf', 'docx'])
145
+ domain = st.selectbox("๐ŸŽฏ Select your domain", ["Software", "Data Science", "Networking", "AI/ML"])
146
+ language = st.selectbox("๐Ÿ’ป Select programming language", ["Python", "Java", "C++", "JavaScript"])
147
 
148
+ if uploaded_resume and domain and language:
149
+ with st.expander("๐Ÿ” Step 1: Resume Originality Check"):
150
+ score = check_resume_originality(uploaded_resume)
151
+ st.info(f"Resume Originality Score: **{score}%**")
 
152
 
153
+ if st.button("๐Ÿš€ Start Interview"):
154
+ result = start_interview(domain, language)
155
+ st.success("โœ… Interview Completed!")
156
 
157
+ st.session_state.last_result = result
158
+ st.session_state.last_candidate = uploaded_resume.name
159
 
160
+ save_to_crm(uploaded_resume.name, domain, result)
 
 
 
 
161
 
162
+ elif page == "Dashboard":
163
+ show_dashboard()