Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| # ---------- CONFIG ---------- | |
| # Using an open-access model instead of gated Mistral | |
| MODEL_NAME = "tiiuae/falcon-7b-instruct" | |
| # Preload model and tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" | |
| ) | |
| generator = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| max_new_tokens=512, | |
| temperature=0.5, | |
| do_sample=True | |
| ) | |
| # ---------- TECH FILTER ---------- | |
| def is_tech_query(message: str) -> bool: | |
| tech_keywords = [ | |
| "python", "java", "javascript", "html", "css", "react", "angular", | |
| "node", "machine learning", "deep learning", "ai", "api", "code", | |
| "debug", "error", "technology", "computer", "programming", "software", | |
| "hardware", "cybersecurity", "database", "sql", "devops", "cloud" | |
| ] | |
| return any(k in message.lower() for k in tech_keywords) | |
| # ---------- CHAT FUNCTION ---------- | |
| def chat_with_model(message, history): | |
| if not is_tech_query(message): | |
| return history + [[message, "β οΈ I can only answer technology-related queries."]] | |
| conversation = "" | |
| for user_msg, bot_msg in history: | |
| conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n" | |
| conversation += f"User: {message}\nAssistant:" | |
| output = generator(conversation)[0]["generated_text"] | |
| if "Assistant:" in output: | |
| answer = output.split("Assistant:")[-1].strip() | |
| else: | |
| answer = output.strip() | |
| return history + [[message, answer]] | |
| # ---------- LOGIN + UI ---------- | |
| session_state = {"authenticated": False} | |
| def login(username, password): | |
| if (username == "admin" and password == "admin123") or (username == "techuser" and password == "techpass"): | |
| session_state["authenticated"] = True | |
| return gr.update(visible=False), gr.update(visible=True), "" | |
| else: | |
| return gr.update(), gr.update(visible=False), "β Invalid credentials." | |
| with gr.Blocks(css=".gradio-container {max-width: 750px; margin: auto;}") as demo: | |
| # Login Page | |
| with gr.Group(visible=not session_state["authenticated"]) as login_group: | |
| gr.Markdown("# π Login to Tech Chatbot") | |
| username = gr.Textbox(label="Username") | |
| password = gr.Textbox(label="Password", type="password") | |
| login_btn = gr.Button("Login") | |
| login_status = gr.Markdown("") | |
| # Chatbot Page | |
| with gr.Group(visible=session_state["authenticated"]) as chat_group: | |
| gr.Markdown("# π» Tech Helper Chatbot") | |
| chatbot = gr.Chatbot(height=500) | |
| msg = gr.Textbox(placeholder="Type your tech question here...", label="Your Message") | |
| clear = gr.Button("Clear Chat") | |
| msg.submit(chat_with_model, [msg, chatbot], chatbot) | |
| clear.click(lambda: None, None, chatbot) | |
| # Button Logic | |
| login_btn.click(login, [username, password], [login_group, chat_group, login_status]) | |
| if __name__ == "__main__": | |
| demo.launch() | |