File size: 3,653 Bytes
13ea9f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cb88d5
13ea9f1
 
 
0cb88d5
 
 
 
13ea9f1
 
 
 
 
 
 
d32148b
13ea9f1
d32148b
13ea9f1
 
 
 
 
 
 
0cb88d5
13ea9f1
0cb88d5
d32148b
0cb88d5
d32148b
 
 
 
 
 
 
 
 
0cb88d5
 
13ea9f1
0cb88d5
13ea9f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import json
import os
import gradio as gr
import pandas as pd
from openai import OpenAI
from transformers import pipeline
from dotenv import load_dotenv

load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

pipe = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-sentiment-latest")

def process_csv(file):
    df = pd.read_csv(file)
    if "Feedback" not in df.columns or "Employee" not in df.columns:
        return None, "❌ Error: CSV must contain 'Employee' and 'Feedback' columns."
    df["Sentiment"] = df["Feedback"].apply(lambda x: pipe(x)[0]["label"])
    return {"df": df}, "✅ CSV processed!"

def predict_attrition_risk(employee_name: str, sentiment: str):
    risk_mapping = {"positive": "Low Risk", "neutral": "Medium Risk", "negative": "High Risk"}
    return f"{employee_name}: {risk_mapping.get(sentiment.lower(), 'Unknown Sentiment')}"

def analyze_attrition_with_llm(df_dict, hr_query):
    if df_dict is None or "df" not in df_dict:
        return "❌ Error: No processed data. Upload a CSV first."
    
    df = df_dict["df"]
    employees_data = {row["Employee"].strip(): row["Sentiment"] for _, row in df.iterrows()}
    
    response = client.chat.completions.create(
        model="gpt-4-turbo",
        messages=[
            {"role": "system", "content": "You are an HR assistant. Only respond to queries about employee attrition risk based on sentiment. If the query is irrelevant, reply with an apology."},
            {"role": "user", "content": hr_query}
        ],
        functions=[
            {
                "name": "predict_attrition_risk",
                "description": "Predicts attrition risk based on sentiment.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "employee_names": {"type": "array", "items": {"type": "string"}, "description": "List of employee names"}
                    },
                    "required": ["employee_names"]
                }
            }
        ],
        function_call="auto"
    )
    
    message = response.choices[0].message
    if hasattr(message, "function_call") and message.function_call is not None:
        try:
            function_call = json.loads(message.function_call.arguments)
            employee_names = function_call.get("employee_names", [])
            
            results = []
            for employee_name in employee_names:
                sentiment = employees_data.get(employee_name)
                if sentiment:
                    results.append(predict_attrition_risk(employee_name, sentiment))
                else:
                    results.append(f"{employee_name}: No records found for this employee.")
            
            return "\n".join(results)
        except Exception as e:
            return f"❌ Error processing LLM function call: {str(e)}"
    
    return "🤖 I'm sorry, but I can only answer queries related to employee attrition risk."

with gr.Blocks() as demo:
    gr.Markdown("<h1>AI-Driven Employee Attrition Risk Analysis</h1>")
    file_input = gr.File(label="Upload Employee Feedback CSV", file_types=[".csv"])
    process_button = gr.Button("Process CSV")
    process_message = gr.Markdown()
    hr_input = gr.Textbox(label="HR Query")
    analyze_button = gr.Button("Ask HR Query")
    output_text = gr.Markdown()
    df_state = gr.State()

    process_button.click(process_csv, inputs=file_input, outputs=[df_state, process_message])
    analyze_button.click(analyze_attrition_with_llm, inputs=[df_state, hr_input], outputs=output_text)

demo.launch(share=True)