pratikshahp commited on
Commit
b2a0fc5
·
verified ·
1 Parent(s): 08f3ef4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -41
app.py CHANGED
@@ -1,58 +1,82 @@
1
  import json
2
  import os
 
 
3
  from openai import OpenAI
 
4
  from dotenv import load_dotenv
5
 
6
- # Load environment variables
7
  load_dotenv()
8
  api_key = os.getenv("OPENAI_API_KEY")
9
  client = OpenAI(api_key=api_key)
10
 
11
- # Function that provides weather details
12
- def get_weather(city: str):
13
- weather_data = {
14
- "New York": "☀️ Sunny, 25°C",
15
- "London": "🌧️ Rainy, 18°C",
16
- "Mumbai": "⛅ Cloudy, 30°C"
17
- }
18
- return weather_data.get(city, "Weather data not available for this city.")
19
-
20
- # Chat function with function calling
21
- def chat_with_llm(user_query):
22
- functions = [
23
- {
24
- "name": "get_weather",
25
- "description": "Fetch weather details for a given city.",
26
- "parameters": {
27
- "type": "object",
28
- "properties": {
29
- "city": {"type": "string", "description": "Name of the city"}
30
- },
31
- "required": ["city"]
32
- }
33
- }
34
- ]
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  response = client.chat.completions.create(
37
- model="gpt-4o-mini",
38
- messages=[{"role": "user", "content": user_query}],
39
- functions=functions,
40
- function_call="auto" # GPT decides whether to call the function
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  )
42
-
43
  message = response.choices[0].message
44
-
45
- if hasattr(message, "function_call") and message.function_call:
46
  try:
47
- function_args = json.loads(message.function_call.arguments)
48
- city = function_args.get("city")
49
- if city:
50
- return get_weather(city)
 
 
51
  except Exception as e:
52
- return f"❌ Error processing function call: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- return message.content # Return normal LLM response if no function is called
 
55
 
56
- # Example Queries
57
- print(chat_with_llm("What's the weather like in New York?")) # Calls function
58
- print(chat_with_llm("Tell me a joke.")) # Normal LLM response
 
1
  import json
2
  import os
3
+ import gradio as gr
4
+ import pandas as pd
5
  from openai import OpenAI
6
+ from transformers import pipeline
7
  from dotenv import load_dotenv
8
 
 
9
  load_dotenv()
10
  api_key = os.getenv("OPENAI_API_KEY")
11
  client = OpenAI(api_key=api_key)
12
 
13
+ pipe = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-sentiment-latest")
14
+
15
+ def process_csv(file):
16
+ df = pd.read_csv(file)
17
+ if "Feedback" not in df.columns or "Employee" not in df.columns:
18
+ return None, "❌ Error: CSV must contain 'Employee' and 'Feedback' columns."
19
+ df["Sentiment"] = df["Feedback"].apply(lambda x: pipe(x)[0]["label"])
20
+ return {"df": df}, " CSV processed!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ def predict_attrition_risk(employee_name: str, sentiment: str):
23
+ risk_mapping = {"positive": "Low Risk", "neutral": "Medium Risk", "negative": "High Risk"}
24
+ return f"{employee_name}: {risk_mapping.get(sentiment.lower(), 'Unknown Sentiment')}"
25
+
26
+ def analyze_attrition_with_llm(df_dict, hr_query):
27
+ if df_dict is None or "df" not in df_dict:
28
+ return "❌ Error: No processed data. Upload a CSV first."
29
+
30
+ df = df_dict["df"]
31
+ employees_data = {row["Employee"].strip(): row["Sentiment"] for _, row in df.iterrows()}
32
+
33
+ # LLM function calling
34
+ prompt = f"HR Query: {hr_query}\nEmployees Data: {json.dumps(employees_data, indent=2)}"
35
  response = client.chat.completions.create(
36
+ model="gpt-4-turbo",
37
+ messages=[{"role": "user", "content": prompt}],
38
+ functions=[
39
+ {
40
+ "name": "predict_attrition_risk",
41
+ "description": "Predicts attrition risk based on sentiment.",
42
+ "parameters": {
43
+ "type": "object",
44
+ "properties": {
45
+ "employee_name": {"type": "string", "description": "Employee's name"},
46
+ "sentiment": {"type": "string", "description": "Extracted sentiment"}
47
+ },
48
+ "required": ["employee_name", "sentiment"]
49
+ }
50
+ }
51
+ ],
52
+ function_call="auto"
53
  )
54
+
55
  message = response.choices[0].message
56
+ if message.function_call:
 
57
  try:
58
+ function_call = json.loads(message.function_call.arguments)
59
+ employee_name = function_call.get("employee_name")
60
+ sentiment = function_call.get("sentiment")
61
+
62
+ if employee_name and sentiment:
63
+ return predict_attrition_risk(employee_name, sentiment)
64
  except Exception as e:
65
+ return f"❌ Error processing LLM function call: {str(e)}"
66
+
67
+ return "🤖 I'm sorry, but I can only answer queries related to employee attrition risk."
68
+
69
+ with gr.Blocks() as demo:
70
+ gr.Markdown("<h1>AI-Driven Employee Attrition Risk Analysis</h1>")
71
+ file_input = gr.File(label="Upload Employee Feedback CSV", file_types=[".csv"])
72
+ process_button = gr.Button("Process CSV")
73
+ process_message = gr.Markdown()
74
+ hr_input = gr.Textbox(label="Employee Name or HR Query")
75
+ analyze_button = gr.Button("Check Attrition Risk")
76
+ output_text = gr.Markdown()
77
+ df_state = gr.State()
78
 
79
+ process_button.click(process_csv, inputs=file_input, outputs=[df_state, process_message])
80
+ analyze_button.click(analyze_attrition_with_llm, inputs=[df_state, hr_input], outputs=output_text)
81
 
82
+ demo.launch(share=True)