| import pandas as pd |
| import requests |
| import gradio as gr |
| import logging |
| from uuid import uuid4 |
| from simple_salesforce import Salesforce |
|
|
| |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| logger = logging.getLogger(__name__) |
|
|
| SALESFORCE_USERNAME = "vaneshdevarapalli866@agentforce.com" |
| SALESFORCE_PASSWORD = "vanesh@331" |
| SALESFORCE_SECURITY_TOKEN = "VRUVbBOdG0s9Q4xy0W6DB1Y6b" |
|
|
| def connect_to_salesforce(): |
| try: |
| sf_instance = Salesforce( |
| username=SALESFORCE_USERNAME, |
| password=SALESFORCE_PASSWORD, |
| security_token=SALESFORCE_SECURITY_TOKEN, |
| domain="login" |
| ) |
| logger.info("Connected to Salesforce successfully.") |
| return sf_instance |
| except Exception as e: |
| logger.error(f"Salesforce connection failed: {e}") |
| raise |
|
|
| sf = connect_to_salesforce() |
|
|
| |
| FASTAPI_ENDPOINT = "http://localhost:8000/predict" |
|
|
| |
| def load_dataset(file_path="equipment_data.csv"): |
| try: |
| df = pd.read_csv(file_path) |
| required_columns = ["Equipment_ID__c", "Equipment_Type__c", "Usage_Hours__c", "Idle_Hours__c"] |
| optional_columns = ["Movement_Frequency__c", "Cost_Per_Hour__c"] |
| missing_required = [col for col in required_columns if col not in df.columns] |
| if missing_required: |
| logger.error(f"Missing required columns: {missing_required}") |
| return None |
| |
| |
| numeric_cols = [col for col in required_columns + optional_columns if col in df.columns and col != "Equipment_ID__c" and col != "Equipment_Type__c"] |
| for col in numeric_cols: |
| df[col] = pd.to_numeric(df[col], errors='coerce') |
| |
| |
| if df[numeric_cols].isnull().any().any(): |
| logger.warning("NaN values detected in numeric columns, filling with 0") |
| df[numeric_cols] = df[numeric_cols].fillna(0) |
| |
| |
| for col in optional_columns: |
| if col not in df.columns: |
| logger.warning(f"Optional column {col} missing, adding with default value 0") |
| df[col] = 0.0 |
| |
| return df |
| except FileNotFoundError: |
| logger.error(f"Dataset file {file_path} not found") |
| return None |
| except Exception as e: |
| logger.error(f"Failed to load dataset: {e}") |
| return None |
|
|
| |
| df = load_dataset() |
| equipment_types = sorted(df["Equipment_Type__c"].dropna().unique().tolist()) if df is not None and not df.empty else ["No Equipment Types"] |
| suggestion_types = ["Move", "Pause Rent", "Repair", "Replace"] |
|
|
| |
| def call_model(row): |
| try: |
| |
| inputs = { |
| "usage_hours": float(row["Usage_Hours__c"]), |
| "idle_hours": float(row["Idle_Hours__c"]), |
| "movement_frequency": float(row.get("Movement_Frequency__c", 0.0)), |
| "cost_per_hour": float(row.get("Cost_Per_Hour__c", 0.0)) |
| } |
| |
| logger.info(f"Sending request with inputs: {inputs}") |
| |
| response = requests.post(FASTAPI_ENDPOINT, json=inputs, timeout=10) |
| response.raise_for_status() |
| result = response.json() |
| |
| |
| logger.info(f"Received response: {result}") |
| |
| suggestion = result.get("suggestion", "Replace") |
| confidence = float(result.get("confidence", 0.5)) |
| |
| if suggestion not in suggestion_types: |
| logger.warning(f"Invalid suggestion: {suggestion}, defaulting to Replace") |
| suggestion = "Replace" |
| |
| if not (0 <= confidence <= 1): |
| logger.warning(f"Invalid confidence: {confidence}, defaulting to 0.5") |
| confidence = 0.5 |
| |
| return suggestion, confidence |
| except (requests.RequestException, ValueError, KeyError) as e: |
| logger.error(f"API call failed: {e}") |
| return "Replace", 0.5 |
|
|
| |
| def filter_equipment(equipment_type, suggestion): |
| if not equipment_type or not suggestion or df is None or df.empty: |
| return "No data available or invalid filters selected.", "" |
| |
| try: |
| filtered = df[df["Equipment_Type__c"].str.lower() == equipment_type.lower()].copy() |
| if filtered.empty: |
| return f"No equipment found for type: {equipment_type}.", "" |
| |
| |
| filtered["AI_Suggestion__c"] = None |
| filtered["Suggestion_Confidence__c"] = 0.0 |
| for idx, row in filtered.iterrows(): |
| s, conf = call_model(row) |
| filtered.at[idx, "AI_Suggestion__c"] = s |
| filtered.at[idx, "Suggestion_Confidence__c"] = conf |
| |
| |
| filtered = filtered[filtered["AI_Suggestion__c"].str.lower() == suggestion.lower()] |
| if filtered.empty: |
| return f"No equipment with suggestion '{suggestion}' for type '{equipment_type}'.", "" |
| |
| |
| cards = [ |
| f"ID: {row['Equipment_ID__c']} | Usage: {row['Usage_Hours__c']:.2f} hrs | " |
| f"Idle: {row['Idle_Hours__c']:.2f} hrs | Move Freq: {row['Movement_Frequency__c']:.2f} | " |
| f"Cost/hr: ${row['Cost_Per_Hour__c']:.2f} | AI: {row['AI_Suggestion__c']} " |
| f"({row['Suggestion_Confidence__c']:.2%})" |
| for _, row in filtered.iterrows() |
| ] |
| confidences = [f"{row['Equipment_ID__c']}: {row['Suggestion_Confidence__c']:.2%}" |
| for _, row in filtered.iterrows()] |
| return "\n\n".join(cards), "\n".join(confidences) |
| except Exception as e: |
| logger.error(f"Error in filter_equipment: {e}") |
| return "An error occurred while filtering equipment.", "" |
|
|
| |
| def export_csv(equipment_type, suggestion): |
| if not equipment_type or not suggestion or df is None or df.empty: |
| return None |
| |
| try: |
| filtered = df[df["Equipment_Type__c"].str.lower() == equipment_type.lower()].copy() |
| if filtered.empty: |
| return None |
| |
| |
| filtered["AI_Suggestion__c"] = None |
| filtered["Suggestion_Confidence__c"] = 0.0 |
| for idx, row in filtered.iterrows(): |
| s, conf = call_model(row) |
| filtered.at[idx, "AI_Suggestion__c"] = s |
| filtered.at[idx, "Suggestion_Confidence__c"] = conf |
| |
| |
| filtered = filtered[filtered["AI_Suggestion__c"].str.lower() == suggestion.lower()] |
| if filtered.empty: |
| return None |
| |
| |
| filename = f"filtered_equipment_{uuid4().hex[:8]}.csv" |
| filtered.to_csv(filename, index=False) |
| return filename |
| except Exception as e: |
| logger.error(f"Error in export_csv: {e}") |
| return None |
|
|
| |
| with gr.Blocks(theme=gr.themes.Soft()) as app: |
| gr.Markdown("# Equipment Utilization Dashboard") |
| gr.Markdown("Filter equipment by type and AI suggestion to optimize utilization.") |
| |
| with gr.Row(): |
| etype = gr.Dropdown(choices=equipment_types, label="Equipment Type", |
| value=equipment_types[0] if equipment_types else None) |
| suggestion = gr.Dropdown(choices=suggestion_types, label="Suggestion Type", value=suggestion_types[0]) |
| |
| results = gr.Textbox(label="Equipment Details", lines=10, placeholder="Select equipment type and suggestion...") |
| confidences = gr.Textbox(label="Confidence Scores", lines=5, placeholder="Confidence scores will appear here...") |
| |
| with gr.Row(): |
| export_button = gr.Button("Export to CSV", variant="primary") |
| |
| file_output = gr.File(label="Download CSV") |
| |
| |
| etype.change(fn=filter_equipment, inputs=[etype, suggestion], outputs=[results, confidences]) |
| suggestion.change(fn=filter_equipment, inputs=[etype, suggestion], outputs=[results, confidences]) |
| export_button.click( |
| fn=export_csv, |
| inputs=[etype, suggestion], |
| outputs=file_output |
| ) |
|
|
| |
| if __name__ == "__main__": |
| try: |
| app.launch(debug=False, share=False) |
| except Exception as e: |
| logger.error(f"Failed to launch Gradio app: {e}") |
|
|