| import os |
| import gradio as gr |
| import numpy as np |
| import torch |
| from transformers import pipeline |
| from sklearn.datasets import fetch_california_housing |
| from sklearn.linear_model import LinearRegression, LogisticRegression |
| from sklearn.model_selection import train_test_split |
| from sklearn.metrics import r2_score, accuracy_score |
|
|
| |
| device = 0 if torch.cuda.is_available() else -1 |
| generator = pipeline( |
| "text-generation", |
| model="distilgpt2", |
| tokenizer="distilgpt2", |
| device=device, |
| max_new_tokens=100, |
| do_sample=True, |
| temperature=0.7, |
| ) |
|
|
| def chat_response(prompt: str) -> str: |
| out = generator(prompt) |
| return out[0]["generated_text"].strip() |
|
|
| |
| data = fetch_california_housing() |
| X, y = data.data, data.target |
|
|
| |
| Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, random_state=42) |
| reg = LinearRegression().fit(Xtr, ytr) |
| r2 = r2_score(yte, reg.predict(Xte)) |
|
|
| |
| y_bin = (y > np.median(y)).astype(int) |
| Xtr2, Xte2, ytr2, yte2 = train_test_split(X, y_bin, test_size=0.2, random_state=42) |
| clf = LogisticRegression(max_iter=1000).fit(Xtr2, ytr2) |
| acc = accuracy_score(yte2, clf.predict(Xte2)) |
|
|
| |
| def respond(message, chat_history, state): |
| stage = state.get("stage", "chat") |
| ml_type = state.get("ml_type") |
|
|
| |
| if stage == "clarify": |
| m = message.lower() |
| if "linear" in m: |
| state.update(stage="ml", ml_type="regression") |
| chat_history.append(("Bot","Running linear regressionβ¦")) |
| elif "class" in m: |
| state.update(stage="ml", ml_type="classification") |
| chat_history.append(("Bot","Running logistic classificationβ¦")) |
| else: |
| chat_history.append(("Bot","β οΈ Say βlinear regressionβ or βclassificationβ.")) |
| return chat_history, state |
|
|
| |
| if state["stage"] == "ml": |
| if state["ml_type"] == "regression": |
| chat_history.append(("Bot",f"β
RΒ²={r2:.3f}\nCoefs={np.round(reg.coef_,3).tolist()}")) |
| else: |
| chat_history.append(("Bot",f"β
Accuracy={acc:.3f}")) |
| state.update(stage="chat", ml_type=None) |
| return chat_history, state |
|
|
| |
| if any(k in message.lower() for k in ["predict","regression","classification"]): |
| state["stage"] = "clarify" |
| chat_history.append(("Bot","Sureβlinear regression or classification?")) |
| return chat_history, state |
|
|
| |
| reply = chat_response(message) |
| chat_history.append(("Bot", reply)) |
| return chat_history, state |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("## π€ LLM + HousingβML Chatbot") |
| chat = gr.Chatbot() |
| user_in = gr.Textbox(placeholder="Type hereβ¦", show_label=False) |
| state = gr.State({"stage":"chat","ml_type":None}) |
| user_in.submit(respond, [user_in, chat, state], [chat, state]) |
|
|
| |
| demo = demo.queue() |
|
|
| if __name__ == "__main__": |
| port = int(os.environ.get("PORT", 7860)) |
| demo.launch(server_name="0.0.0.0", server_port=port) |
|
|