File size: 8,552 Bytes
56e5c72
7c8bda5
 
 
 
2c0822a
 
370aaa7
 
 
 
2c0822a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56e5c72
7c8bda5
 
653a168
7c8bda5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2762d2
7c8bda5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2762d2
7c8bda5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56e5c72
7c8bda5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653a168
7c8bda5
 
653a168
7c8bda5
b2762d2
653a168
7c8bda5
 
 
b2762d2
7c8bda5
 
b2762d2
653a168
7c8bda5
56e5c72
7c8bda5
56e5c72
7c8bda5
 
 
 
 
 
 
 
56e5c72
7c8bda5
653a168
7c8bda5
 
 
370aaa7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import pandas as pd
import requests
import gradio as gr
import logging
from uuid import uuid4
from simple_salesforce import Salesforce

# Configure logging AT THE TOP before usage
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

SALESFORCE_USERNAME = "vaneshdevarapalli866@agentforce.com"
SALESFORCE_PASSWORD = "vanesh@331"
SALESFORCE_SECURITY_TOKEN = "VRUVbBOdG0s9Q4xy0W6DB1Y6b"

def connect_to_salesforce():
    try:
        sf_instance = Salesforce(
            username=SALESFORCE_USERNAME,
            password=SALESFORCE_PASSWORD,
            security_token=SALESFORCE_SECURITY_TOKEN,
            domain="login"
        )
        logger.info("Connected to Salesforce successfully.")
        return sf_instance
    except Exception as e:
        logger.error(f"Salesforce connection failed: {e}")
        raise

sf = connect_to_salesforce()

# FastAPI endpoint
FASTAPI_ENDPOINT = "http://localhost:8000/predict"

# Load and validate dataset
def load_dataset(file_path="equipment_data.csv"):
    try:
        df = pd.read_csv(file_path)
        required_columns = ["Equipment_ID__c", "Equipment_Type__c", "Usage_Hours__c", "Idle_Hours__c"]
        optional_columns = ["Movement_Frequency__c", "Cost_Per_Hour__c"]
        missing_required = [col for col in required_columns if col not in df.columns]
        if missing_required:
            logger.error(f"Missing required columns: {missing_required}")
            return None
        
        # Ensure numeric columns are properly typed
        numeric_cols = [col for col in required_columns + optional_columns if col in df.columns and col != "Equipment_ID__c" and col != "Equipment_Type__c"]
        for col in numeric_cols:
            df[col] = pd.to_numeric(df[col], errors='coerce')
        
        # Fill NaN values in numeric columns with 0
        if df[numeric_cols].isnull().any().any():
            logger.warning("NaN values detected in numeric columns, filling with 0")
            df[numeric_cols] = df[numeric_cols].fillna(0)
        
        # Add missing optional columns with default value 0
        for col in optional_columns:
            if col not in df.columns:
                logger.warning(f"Optional column {col} missing, adding with default value 0")
                df[col] = 0.0
                
        return df
    except FileNotFoundError:
        logger.error(f"Dataset file {file_path} not found")
        return None
    except Exception as e:
        logger.error(f"Failed to load dataset: {e}")
        return None

# Load dataset
df = load_dataset()
equipment_types = sorted(df["Equipment_Type__c"].dropna().unique().tolist()) if df is not None and not df.empty else ["No Equipment Types"]
suggestion_types = ["Move", "Pause Rent", "Repair", "Replace"]

# Call FastAPI endpoint
def call_model(row):
    try:
        # Prepare inputs
        inputs = {
            "usage_hours": float(row["Usage_Hours__c"]),
            "idle_hours": float(row["Idle_Hours__c"]),
            "movement_frequency": float(row.get("Movement_Frequency__c", 0.0)),
            "cost_per_hour": float(row.get("Cost_Per_Hour__c", 0.0))
        }
        
        logger.info(f"Sending request with inputs: {inputs}")  # Log inputs
        
        response = requests.post(FASTAPI_ENDPOINT, json=inputs, timeout=10)
        response.raise_for_status()
        result = response.json()
        
        # Log the API response
        logger.info(f"Received response: {result}")
        
        suggestion = result.get("suggestion", "Replace")
        confidence = float(result.get("confidence", 0.5))
        
        if suggestion not in suggestion_types:
            logger.warning(f"Invalid suggestion: {suggestion}, defaulting to Replace")
            suggestion = "Replace"
        
        if not (0 <= confidence <= 1):
            logger.warning(f"Invalid confidence: {confidence}, defaulting to 0.5")
            confidence = 0.5
        
        return suggestion, confidence
    except (requests.RequestException, ValueError, KeyError) as e:
        logger.error(f"API call failed: {e}")
        return "Replace", 0.5

# Filter equipment and generate display
def filter_equipment(equipment_type, suggestion):
    if not equipment_type or not suggestion or df is None or df.empty:
        return "No data available or invalid filters selected.", ""
    
    try:
        filtered = df[df["Equipment_Type__c"].str.lower() == equipment_type.lower()].copy()
        if filtered.empty:
            return f"No equipment found for type: {equipment_type}.", ""
        
        # Apply model predictions
        filtered["AI_Suggestion__c"] = None
        filtered["Suggestion_Confidence__c"] = 0.0
        for idx, row in filtered.iterrows():
            s, conf = call_model(row)
            filtered.at[idx, "AI_Suggestion__c"] = s
            filtered.at[idx, "Suggestion_Confidence__c"] = conf
        
        # Filter by suggestion
        filtered = filtered[filtered["AI_Suggestion__c"].str.lower() == suggestion.lower()]
        if filtered.empty:
            return f"No equipment with suggestion '{suggestion}' for type '{equipment_type}'.", ""
        
        # Generate display cards
        cards = [
            f"ID: {row['Equipment_ID__c']} | Usage: {row['Usage_Hours__c']:.2f} hrs | "
            f"Idle: {row['Idle_Hours__c']:.2f} hrs | Move Freq: {row['Movement_Frequency__c']:.2f} | "
            f"Cost/hr: ${row['Cost_Per_Hour__c']:.2f} | AI: {row['AI_Suggestion__c']} "
            f"({row['Suggestion_Confidence__c']:.2%})"
            for _, row in filtered.iterrows()
        ]
        confidences = [f"{row['Equipment_ID__c']}: {row['Suggestion_Confidence__c']:.2%}" 
                      for _, row in filtered.iterrows()]
        return "\n\n".join(cards), "\n".join(confidences)
    except Exception as e:
        logger.error(f"Error in filter_equipment: {e}")
        return "An error occurred while filtering equipment.", ""

# Export filtered data to CSV
def export_csv(equipment_type, suggestion):
    if not equipment_type or not suggestion or df is None or df.empty:
        return None
    
    try:
        filtered = df[df["Equipment_Type__c"].str.lower() == equipment_type.lower()].copy()
        if filtered.empty:
            return None
        
        # Apply model predictions
        filtered["AI_Suggestion__c"] = None
        filtered["Suggestion_Confidence__c"] = 0.0
        for idx, row in filtered.iterrows():
            s, conf = call_model(row)
            filtered.at[idx, "AI_Suggestion__c"] = s
            filtered.at[idx, "Suggestion_Confidence__c"] = conf
        
        # Filter by suggestion
        filtered = filtered[filtered["AI_Suggestion__c"].str.lower() == suggestion.lower()]
        if filtered.empty:
            return None
        
        # Export to CSV
        filename = f"filtered_equipment_{uuid4().hex[:8]}.csv"
        filtered.to_csv(filename, index=False)
        return filename
    except Exception as e:
        logger.error(f"Error in export_csv: {e}")
        return None

# Build Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as app:
    gr.Markdown("# Equipment Utilization Dashboard")
    gr.Markdown("Filter equipment by type and AI suggestion to optimize utilization.")
    
    with gr.Row():
        etype = gr.Dropdown(choices=equipment_types, label="Equipment Type", 
                           value=equipment_types[0] if equipment_types else None)
        suggestion = gr.Dropdown(choices=suggestion_types, label="Suggestion Type", value=suggestion_types[0])
    
    results = gr.Textbox(label="Equipment Details", lines=10, placeholder="Select equipment type and suggestion...")
    confidences = gr.Textbox(label="Confidence Scores", lines=5, placeholder="Confidence scores will appear here...")
    
    with gr.Row():
        export_button = gr.Button("Export to CSV", variant="primary")
    
    file_output = gr.File(label="Download CSV")
    
    # Define interactions
    etype.change(fn=filter_equipment, inputs=[etype, suggestion], outputs=[results, confidences])
    suggestion.change(fn=filter_equipment, inputs=[etype, suggestion], outputs=[results, confidences])
    export_button.click(
        fn=export_csv, 
        inputs=[etype, suggestion], 
        outputs=file_output
    )

# Launch the app
if __name__ == "__main__":
    try:
        app.launch(debug=False, share=False)
    except Exception as e:
        logger.error(f"Failed to launch Gradio app: {e}")