hari7261 commited on
Commit
77d7aad
·
verified ·
1 Parent(s): b2f4664

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -3,7 +3,8 @@ import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
  # ---------- CONFIG ----------
6
- MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2" # Change to smaller model if needed
 
7
 
8
  # Preload model and tokenizer
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
@@ -37,7 +38,6 @@ def chat_with_model(message, history):
37
  if not is_tech_query(message):
38
  return history + [[message, "⚠️ I can only answer technology-related queries."]]
39
 
40
- # Build conversation context
41
  conversation = ""
42
  for user_msg, bot_msg in history:
43
  conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
@@ -55,7 +55,6 @@ def chat_with_model(message, history):
55
  session_state = {"authenticated": False}
56
 
57
  def login(username, password):
58
- # Simple direct login check
59
  if (username == "admin" and password == "admin123") or (username == "techuser" and password == "techpass"):
60
  session_state["authenticated"] = True
61
  return gr.update(visible=False), gr.update(visible=True), ""
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
  # ---------- CONFIG ----------
6
+ # Using an open-access model instead of gated Mistral
7
+ MODEL_NAME = "tiiuae/falcon-7b-instruct"
8
 
9
  # Preload model and tokenizer
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
 
38
  if not is_tech_query(message):
39
  return history + [[message, "⚠️ I can only answer technology-related queries."]]
40
 
 
41
  conversation = ""
42
  for user_msg, bot_msg in history:
43
  conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
 
55
  session_state = {"authenticated": False}
56
 
57
  def login(username, password):
 
58
  if (username == "admin" and password == "admin123") or (username == "techuser" and password == "techpass"):
59
  session_state["authenticated"] = True
60
  return gr.update(visible=False), gr.update(visible=True), ""