Final_1 / app.py
DronA23's picture
Update app.py
ce0d582 verified
import os
import gradio as gr
import numpy as np
import torch
from transformers import pipeline
from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, accuracy_score
# ─── 1) LOAD A PUBLIC LLM ──────────────────────────────────────────────────────
device = 0 if torch.cuda.is_available() else -1
generator = pipeline(
"text-generation",
model="distilgpt2",
tokenizer="distilgpt2",
device=device,
max_new_tokens=100,
do_sample=True,
temperature=0.7,
)
def chat_response(prompt: str) -> str:
out = generator(prompt)
return out[0]["generated_text"].strip()
# ─── 2) PREPARE HOUSING ML MODELS ───────────────────────────────────────────────
data = fetch_california_housing()
X, y = data.data, data.target
# linear regression
Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, random_state=42)
reg = LinearRegression().fit(Xtr, ytr)
r2 = r2_score(yte, reg.predict(Xte))
# logistic classification
y_bin = (y > np.median(y)).astype(int)
Xtr2, Xte2, ytr2, yte2 = train_test_split(X, y_bin, test_size=0.2, random_state=42)
clf = LogisticRegression(max_iter=1000).fit(Xtr2, ytr2)
acc = accuracy_score(yte2, clf.predict(Xte2))
# ─── 3) ORCHESTRATOR / STATE MACHINE ─────────────────────────────────────────
def respond(message, chat_history, state):
stage = state.get("stage", "chat")
ml_type = state.get("ml_type")
# Clarify step
if stage == "clarify":
m = message.lower()
if "linear" in m:
state.update(stage="ml", ml_type="regression")
chat_history.append(("Bot","Running linear regression…"))
elif "class" in m:
state.update(stage="ml", ml_type="classification")
chat_history.append(("Bot","Running logistic classification…"))
else:
chat_history.append(("Bot","⚠️ Say β€˜linear regression’ or β€˜classification’."))
return chat_history, state
# ML execution
if state["stage"] == "ml":
if state["ml_type"] == "regression":
chat_history.append(("Bot",f"βœ… RΒ²={r2:.3f}\nCoefs={np.round(reg.coef_,3).tolist()}"))
else:
chat_history.append(("Bot",f"βœ… Accuracy={acc:.3f}"))
state.update(stage="chat", ml_type=None)
return chat_history, state
# Detect intent
if any(k in message.lower() for k in ["predict","regression","classification"]):
state["stage"] = "clarify"
chat_history.append(("Bot","Sureβ€”linear regression or classification?"))
return chat_history, state
# Fallback to LLM
reply = chat_response(message)
chat_history.append(("Bot", reply))
return chat_history, state
# ─── 4) GRADIO UI ───────────────────────────────────────────────────────────────
with gr.Blocks() as demo:
gr.Markdown("## πŸ€– LLM + Housing‐ML Chatbot")
chat = gr.Chatbot()
user_in = gr.Textbox(placeholder="Type here…", show_label=False)
state = gr.State({"stage":"chat","ml_type":None})
user_in.submit(respond, [user_in, chat, state], [chat, state])
# queue() creates the /api/predict endpoint that Spaces needs
demo = demo.queue()
if __name__ == "__main__":
port = int(os.environ.get("PORT", 7860))
demo.launch(server_name="0.0.0.0", server_port=port)