Spaces:
Paused
Paused
Ali Mohsin
commited on
Commit
·
dce7a6a
1
Parent(s):
fb406bf
first commit 2
Browse files- app.py +92 -0
- requirements.txt +2 -0
app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
import json
|
| 4 |
+
import random
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# Initialize the client (uses HUGGING_FACE_HUB_TOKEN from environment)
|
| 8 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 9 |
+
|
| 10 |
+
SYSTEM_PROMPT = """You are ModelForge, an expert AI architecture assistant. Your goal is to analyze machine learning problems and generate detailed, deployable solutions in strict JSON format.
|
| 11 |
+
|
| 12 |
+
You must analyze the user's request and return a JSON object with the following structure:
|
| 13 |
+
{
|
| 14 |
+
"analysis": {
|
| 15 |
+
"dataType": "image" | "text" | "tabular" | "audio" | "video" | "time_series" | "multimodal",
|
| 16 |
+
"taskType": "classification" | "regression" | "nlp" | "vision" | "forecasting" | "multimodal_reasoning",
|
| 17 |
+
"complexity": "low" | "medium" | "high" | "research",
|
| 18 |
+
"domain": "string (e.g., medical, finance, etc.)"
|
| 19 |
+
},
|
| 20 |
+
"recommendations": [
|
| 21 |
+
{
|
| 22 |
+
"name": "Model Name",
|
| 23 |
+
"description": "Detailed technical description...",
|
| 24 |
+
"pros": ["pro1", "pro2", "pro3"],
|
| 25 |
+
"cons": ["con1", "con2"],
|
| 26 |
+
"architectureDiagram": "Mermaid graph definition...",
|
| 27 |
+
"mlopsBestPractices": ["step 1", "step 2", ...],
|
| 28 |
+
"trainingCode": "Python code snippet..."
|
| 29 |
+
}
|
| 30 |
+
]
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
Provide 2-3 distinct recommendations. For research-level problems, propose novel architectures.
|
| 34 |
+
Ensure the Mermaid diagram uses valid syntax (no curly braces for nodes, use square brackets []).
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
FEW_SHOT_EXAMPLES = """
|
| 38 |
+
Example Input: "Detect fraud in credit card transactions"
|
| 39 |
+
Example Output:
|
| 40 |
+
{
|
| 41 |
+
"analysis": { "dataType": "tabular", "taskType": "classification", "complexity": "medium", "domain": "finance" },
|
| 42 |
+
"recommendations": [
|
| 43 |
+
{
|
| 44 |
+
"name": "XGBoost Fraud Detector",
|
| 45 |
+
"description": "Gradient boosting ensemble optimized for imbalanced tabular data...",
|
| 46 |
+
"pros": ["High interpretability", "Handles missing data"],
|
| 47 |
+
"cons": ["Feature engineering required"],
|
| 48 |
+
"architectureDiagram": "graph TD\\nA[Raw Data] --> B[Preprocessing]\\nB --> C[XGBoost]",
|
| 49 |
+
"mlopsBestPractices": ["Use DVC for data", "Monitor drift"],
|
| 50 |
+
"trainingCode": "import xgboost as xgb..."
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def generate_solution(description):
|
| 57 |
+
prompt = f"{SYSTEM_PROMPT}\n\n{FEW_SHOT_EXAMPLES}\n\nUser Input: \"{description}\"\n\nJSON Response:"
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
response = client.text_generation(
|
| 61 |
+
prompt,
|
| 62 |
+
max_new_tokens=2048,
|
| 63 |
+
temperature=0.7,
|
| 64 |
+
top_p=0.95,
|
| 65 |
+
return_full_text=False
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# clean up response to ensure it's valid JSON
|
| 69 |
+
json_str = response.strip()
|
| 70 |
+
if json_str.startswith("```json"):
|
| 71 |
+
json_str = json_str.split("```json")[1].split("```")[0].strip()
|
| 72 |
+
elif json_str.startswith("```"):
|
| 73 |
+
json_str = json_str.split("```")[1].split("```")[0].strip()
|
| 74 |
+
|
| 75 |
+
return json_str
|
| 76 |
+
except Exception as e:
|
| 77 |
+
return json.dumps({
|
| 78 |
+
"error": str(e),
|
| 79 |
+
"analysis": {"dataType": "text", "taskType": "nlp", "complexity": "low", "domain": "general"},
|
| 80 |
+
"recommendations": []
|
| 81 |
+
})
|
| 82 |
+
|
| 83 |
+
demo = gr.Interface(
|
| 84 |
+
fn=generate_solution,
|
| 85 |
+
inputs=gr.Textbox(lines=5, placeholder="Describe your ML problem..."),
|
| 86 |
+
outputs=gr.JSON(label="Recommendations"),
|
| 87 |
+
title="ModelForge AI Backend",
|
| 88 |
+
description="Generates ML recommendations via LLM."
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
huggingface_hub
|