Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, File, UploadFile, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import FileResponse, HTMLResponse, JSONResponse | |
| from fastapi.staticfiles import StaticFiles | |
| import tempfile | |
| import os | |
| import base64 | |
| from typing import Dict, Any, Optional | |
| import shutil | |
| import json | |
| import numpy as np | |
| import google.generativeai as genai | |
| from dotenv import load_dotenv | |
| from lib import get_all_model_predictions, create_confidence_chart, create_voting_chart | |
| # Load environment variables | |
| load_dotenv() | |
| GEMINI_API_KEY = os.getenv('GEMINI_API_KEY') | |
| if not GEMINI_API_KEY: | |
| raise ValueError("GEMINI_API_KEY environment variable is not set") | |
| # Configure Gemini | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| model = genai.GenerativeModel('gemini-2.0-flash', | |
| generation_config={ | |
| 'temperature': 0.3, # Lower temperature for more focused responses | |
| 'top_p': 0.8, | |
| 'top_k': 40, | |
| 'max_output_tokens': 2048, # Ensure we get detailed responses | |
| 'response_mime_type': 'application/json', | |
| 'response_schema': { | |
| 'type': 'object', | |
| 'properties': { | |
| 'issue_description': {'type': 'string'}, | |
| 'possible_causes': {'type': 'array', 'items': {'type': 'string'}}, | |
| 'possible_fixes': {'type': 'array', 'items': {'type': 'object', 'properties': { | |
| 'fix': {'type': 'string'}, | |
| 'complexity': {'type': 'string'}, | |
| 'tools_required': {'type': 'array', 'items': {'type': 'string'}}, | |
| 'estimated_time': {'type': 'string'} | |
| }}}, | |
| 'severity': {'type': 'string'}, | |
| 'risk_assessment': {'type': 'string'}, | |
| 'estimated_cost': {'type': 'object', 'properties': { | |
| 'parts': {'type': 'string'}, | |
| 'labor': {'type': 'string'}, | |
| 'total': {'type': 'string'} | |
| }}, | |
| 'diy_possible': {'type': 'boolean'}, | |
| 'professional_help_recommended': {'type': 'boolean'}, | |
| 'preventive_measures': {'type': 'array', 'items': {'type': 'string'}}, | |
| 'warning_signs': {'type': 'array', 'items': {'type': 'string'}}, | |
| 'additional_diagnostics_needed': {'type': 'array', 'items': {'type': 'string'}} | |
| }, | |
| 'required': [ | |
| 'issue_description', | |
| 'possible_causes', | |
| 'possible_fixes', | |
| 'severity', | |
| 'risk_assessment', | |
| 'estimated_cost', | |
| 'diy_possible', | |
| 'professional_help_recommended', | |
| 'preventive_measures', | |
| 'warning_signs', | |
| 'additional_diagnostics_needed' | |
| ] | |
| } | |
| }) | |
| def convert_numpy_types(obj): | |
| """Convert numpy types to Python native types""" | |
| if isinstance(obj, np.integer): | |
| return int(obj) | |
| elif isinstance(obj, np.floating): | |
| return float(obj) | |
| elif isinstance(obj, np.ndarray): | |
| return obj.tolist() | |
| elif isinstance(obj, dict): | |
| return {key: convert_numpy_types(value) for key, value in obj.items()} | |
| elif isinstance(obj, (list, tuple)): | |
| return [convert_numpy_types(item) for item in obj] | |
| return obj | |
| async def get_issue_fixes(issue: str) -> dict: | |
| """Get potential fixes for an identified issue using Gemini API""" | |
| try: | |
| prompt = f"""As an expert automotive diagnostic technician, analyze and provide detailed recommendations for the following engine issue detected through sound analysis: | |
| Issue Detected: {issue} | |
| Consider the following aspects in your analysis: | |
| 1. Common causes of this specific sound/issue | |
| 2. Potential mechanical components involved | |
| 3. Risk of continued operation | |
| 4. Immediate actions needed | |
| 5. Long-term preventive measures | |
| 6. Required tools and expertise level | |
| 7. Estimated repair time | |
| 8. Parts that might need replacement | |
| Format your response as a detailed JSON with the following structure: | |
| {{ | |
| "issue_description": "Brief technical description of the issue", | |
| "possible_causes": ["cause1", "cause2", ...], | |
| "possible_fixes": [ | |
| {{ | |
| "fix": "detailed fix description", | |
| "complexity": "easy|medium|hard", | |
| "tools_required": ["tool1", "tool2", ...], | |
| "estimated_time": "time in hours" | |
| }}, | |
| ... | |
| ], | |
| "severity": "low|medium|high", | |
| "risk_assessment": "Detailed risk of continued operation", | |
| "estimated_cost": {{ | |
| "parts": "cost range in INR", | |
| "labor": "cost range in INR", | |
| "total": "total cost range in INR" | |
| }}, | |
| "diy_possible": true|false, | |
| "professional_help_recommended": true|false, | |
| "preventive_measures": ["measure1", "measure2", ...], | |
| "warning_signs": ["sign1", "sign2", ...], | |
| "additional_diagnostics_needed": ["test1", "test2", ...] | |
| }} | |
| Ensure the response is technically accurate and prioritizes safety.""" | |
| response = await model.generate_content_async(prompt) | |
| return json.loads(response.text) | |
| except Exception as e: | |
| print(f"Error getting issue fixes: {str(e)}") | |
| return { | |
| "issue_description": "Unable to analyze the issue in detail", | |
| "possible_causes": ["Multiple factors could be involved"], | |
| "possible_fixes": [ | |
| { | |
| "fix": "Please consult a professional mechanic for proper diagnosis", | |
| "complexity": "unknown", | |
| "tools_required": ["Professional diagnostic equipment"], | |
| "estimated_time": "Varies" | |
| } | |
| ], | |
| "severity": "unknown", | |
| "risk_assessment": "Cannot assess risk without professional inspection", | |
| "estimated_cost": { | |
| "parts": "Unknown - requires inspection", | |
| "labor": "Unknown - requires inspection", | |
| "total": "Unknown - requires professional diagnosis" | |
| }, | |
| "diy_possible": False, | |
| "professional_help_recommended": True, | |
| "preventive_measures": ["Regular maintenance", "Professional inspection"], | |
| "warning_signs": ["Unusual engine sounds", "Changes in performance"], | |
| "additional_diagnostics_needed": ["Professional diagnostic scan", "Physical inspection"] | |
| } | |
| app = FastAPI(title="Engine Sound Classifier") | |
| # Add CORS middleware | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Mount static files | |
| os.makedirs("static", exist_ok=True) | |
| app.mount("/static", StaticFiles(directory="static"), name="static") | |
| async def root(): | |
| """Serve the index.html file""" | |
| with open("index.html", "r") as f: | |
| return f.read() | |
| async def predict_audio(file: UploadFile = File(...)): | |
| """Process uploaded audio file and return predictions""" | |
| # Save uploaded file temporarily | |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp: | |
| shutil.copyfileobj(file.file, tmp) | |
| tmp_path = tmp.name | |
| try: | |
| # Get predictions from all models | |
| results, best_model, best_prediction, highest_confidence = get_all_model_predictions(tmp_path) | |
| # Convert numpy types to Python native types | |
| results = convert_numpy_types(results) | |
| highest_confidence = float(highest_confidence) if isinstance(highest_confidence, np.floating) else highest_confidence | |
| # Create visualization charts | |
| confidence_chart_path = create_confidence_chart(results, best_model) | |
| voting_chart_path = create_voting_chart(results) | |
| # Read and encode charts if they exist | |
| confidence_chart_data = None | |
| voting_chart_data = None | |
| if confidence_chart_path and os.path.exists(confidence_chart_path): | |
| with open(confidence_chart_path, "rb") as img_file: | |
| confidence_chart_data = base64.b64encode(img_file.read()).decode('utf-8') | |
| if voting_chart_path and os.path.exists(voting_chart_path): | |
| with open(voting_chart_path, "rb") as img_file: | |
| voting_chart_data = base64.b64encode(img_file.read()).decode('utf-8') | |
| # Get potential fixes if an issue is detected | |
| fixes = None | |
| print(best_prediction) | |
| if best_prediction != "Normal Engine Sound (No Issues)": | |
| fixes = await get_issue_fixes(best_prediction) | |
| # Prepare response | |
| response = { | |
| "predictions": results, | |
| "best_model": best_model, | |
| "best_prediction": best_prediction, | |
| "confidence": highest_confidence, | |
| "confidence_chart": confidence_chart_data, | |
| "voting_chart": voting_chart_data, | |
| "fixes": fixes | |
| } | |
| return response | |
| finally: | |
| # Clean up temporary files | |
| os.unlink(tmp_path) | |
| for chart_path in [confidence_chart_path, voting_chart_path]: | |
| if chart_path and os.path.exists(chart_path): | |
| os.unlink(chart_path) | |
| async def get_service_centers(lat: float, lon: float, radius: Optional[int] = 5000): | |
| """Get nearby automobile service centers using user's location""" | |
| try: | |
| # Here you would typically use a mapping service API (Google Maps, etc) | |
| # For now, returning a mock response | |
| return { | |
| "service_centers": [ | |
| { | |
| "name": "Example Auto Service", | |
| "address": "123 Main St", | |
| "rating": 4.5, | |
| "distance": "1.2 km", | |
| "phone": "+1-234-567-8900" | |
| } | |
| ] | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| async def get_models(): | |
| """Return a list of available models""" | |
| models_dir = 'models' | |
| model_names = { | |
| 'lr_model.joblib': 'Logistic Regression', | |
| 'nn_model.joblib': 'Neural Network', | |
| 'rf_model.joblib': 'Random Forest', | |
| 'svm_model.joblib': 'Support Vector Machine', | |
| 'xgb_model.joblib': 'XGBoost' | |
| } | |
| available_models = [] | |
| if os.path.exists(models_dir): | |
| model_files = [f for f in os.listdir(models_dir) if f.endswith('_model.joblib')] | |
| available_models = [model_names[file] for file in model_files if file in model_names] | |
| return {"models": available_models} | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |