hari7261 commited on
Commit
56df3cf
Β·
verified Β·
1 Parent(s): 011dc8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -40
app.py CHANGED
@@ -6,22 +6,43 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
  # Using an open-access model instead of gated Mistral
7
  MODEL_NAME = "tiiuae/falcon-7b-instruct"
8
 
9
- # Preload model and tokenizer
10
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
- model = AutoModelForCausalLM.from_pretrained(
12
- MODEL_NAME,
13
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
14
- device_map="auto"
15
- )
16
-
17
- generator = pipeline(
18
- "text-generation",
19
- model=model,
20
- tokenizer=tokenizer,
21
- max_new_tokens=512,
22
- temperature=0.5,
23
- do_sample=True
24
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # ---------- TECH FILTER ----------
27
  def is_tech_query(message: str) -> bool:
@@ -29,59 +50,93 @@ def is_tech_query(message: str) -> bool:
29
  "python", "java", "javascript", "html", "css", "react", "angular",
30
  "node", "machine learning", "deep learning", "ai", "api", "code",
31
  "debug", "error", "technology", "computer", "programming", "software",
32
- "hardware", "cybersecurity", "database", "sql", "devops", "cloud"
 
 
 
 
33
  ]
34
  return any(k in message.lower() for k in tech_keywords)
35
 
36
  # ---------- CHAT FUNCTION ----------
37
  def chat_with_model(message, history):
38
  if not is_tech_query(message):
39
- return history + [[message, "⚠️ I can only answer technology-related queries."]]
40
 
41
  conversation = ""
42
  for user_msg, bot_msg in history:
43
  conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
44
  conversation += f"User: {message}\nAssistant:"
45
 
46
- output = generator(conversation)[0]["generated_text"]
47
- if "Assistant:" in output:
48
- answer = output.split("Assistant:")[-1].strip()
49
- else:
50
- answer = output.strip()
51
-
52
- return history + [[message, answer]]
 
 
 
 
 
 
 
53
 
54
  # ---------- LOGIN + UI ----------
55
  session_state = {"authenticated": False}
56
 
57
  def login(username, password):
58
- if (username == "admin" and password == "admin123") or (username == "techuser" and password == "techpass"):
 
 
 
 
 
 
59
  session_state["authenticated"] = True
60
  return gr.update(visible=False), gr.update(visible=True), ""
61
  else:
62
- return gr.update(), gr.update(visible=False), "❌ Invalid credentials."
 
 
 
 
63
 
64
- with gr.Blocks(css=".gradio-container {max-width: 750px; margin: auto;}") as demo:
 
 
 
 
65
  # Login Page
66
- with gr.Group(visible=not session_state["authenticated"]) as login_group:
67
- gr.Markdown("# πŸ” Login to Tech Chatbot")
68
- username = gr.Textbox(label="Username")
69
- password = gr.Textbox(label="Password", type="password")
70
- login_btn = gr.Button("Login")
 
 
71
  login_status = gr.Markdown("")
72
 
73
  # Chatbot Page
74
- with gr.Group(visible=session_state["authenticated"]) as chat_group:
75
- gr.Markdown("# πŸ’» Tech Helper Chatbot")
76
  chatbot = gr.Chatbot(height=500)
77
- msg = gr.Textbox(placeholder="Type your tech question here...", label="Your Message")
78
- clear = gr.Button("Clear Chat")
 
 
 
 
 
79
 
80
- msg.submit(chat_with_model, [msg, chatbot], chatbot)
81
- clear.click(lambda: None, None, chatbot)
 
 
82
 
83
  # Button Logic
84
  login_btn.click(login, [username, password], [login_group, chat_group, login_status])
85
 
86
  if __name__ == "__main__":
87
- demo.launch()
 
6
  # Using an open-access model instead of gated Mistral
7
  MODEL_NAME = "tiiuae/falcon-7b-instruct"
8
 
9
+ try:
10
+ # Preload model and tokenizer
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
12
+
13
+ # Load model with appropriate settings
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ MODEL_NAME,
16
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
17
+ device_map="auto" if torch.cuda.is_available() else None,
18
+ low_cpu_mem_usage=True
19
+ )
20
+
21
+ generator = pipeline(
22
+ "text-generation",
23
+ model=model,
24
+ tokenizer=tokenizer,
25
+ max_new_tokens=512,
26
+ temperature=0.5,
27
+ do_sample=True
28
+ )
29
+
30
+ except Exception as e:
31
+ print(f"Error loading model: {str(e)}")
32
+ # Fallback to CPU if GPU fails
33
+ model = AutoModelForCausalLM.from_pretrained(
34
+ MODEL_NAME,
35
+ torch_dtype=torch.float32,
36
+ device_map=None
37
+ )
38
+ generator = pipeline(
39
+ "text-generation",
40
+ model=model,
41
+ tokenizer=tokenizer,
42
+ max_new_tokens=512,
43
+ temperature=0.5,
44
+ do_sample=True
45
+ )
46
 
47
  # ---------- TECH FILTER ----------
48
  def is_tech_query(message: str) -> bool:
 
50
  "python", "java", "javascript", "html", "css", "react", "angular",
51
  "node", "machine learning", "deep learning", "ai", "api", "code",
52
  "debug", "error", "technology", "computer", "programming", "software",
53
+ "hardware", "cybersecurity", "database", "sql", "devops", "cloud",
54
+ "algorithm", "backend", "frontend", "server", "linux", "windows",
55
+ "docker", "kubernetes", "git", "github", "vscode", "pycharm",
56
+ "tensorflow", "pytorch", "neural network", "blockchain", "web3",
57
+ "smart contract", "ethereum", "bitcoin", "cryptography", "encryption"
58
  ]
59
  return any(k in message.lower() for k in tech_keywords)
60
 
61
  # ---------- CHAT FUNCTION ----------
62
  def chat_with_model(message, history):
63
  if not is_tech_query(message):
64
+ return history + [[message, "⚠️ I can only answer technology-related queries. Please ask about programming, AI, cybersecurity, or other tech topics."]]
65
 
66
  conversation = ""
67
  for user_msg, bot_msg in history:
68
  conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
69
  conversation += f"User: {message}\nAssistant:"
70
 
71
+ try:
72
+ output = generator(conversation, pad_token_id=tokenizer.eos_token_id)[0]["generated_text"]
73
+ if "Assistant:" in output:
74
+ answer = output.split("Assistant:")[-1].strip()
75
+ else:
76
+ answer = output.strip()
77
+
78
+ # Clean up response
79
+ answer = answer.split("User:")[0].strip() # Remove any following user prompts
80
+ return history + [[message, answer]]
81
+
82
+ except Exception as e:
83
+ error_msg = f"❌ Error generating response: {str(e)}"
84
+ return history + [[message, error_msg]]
85
 
86
  # ---------- LOGIN + UI ----------
87
  session_state = {"authenticated": False}
88
 
89
  def login(username, password):
90
+ valid_credentials = {
91
+ "admin": "admin123",
92
+ "techuser": "techpass",
93
+ "guest": "guest123"
94
+ }
95
+
96
+ if username in valid_credentials and password == valid_credentials[username]:
97
  session_state["authenticated"] = True
98
  return gr.update(visible=False), gr.update(visible=True), ""
99
  else:
100
+ return gr.update(), gr.update(visible=False), "❌ Invalid credentials. Try admin/admin123 or techuser/techpass"
101
+
102
+ def logout():
103
+ session_state["authenticated"] = False
104
+ return gr.update(visible=True), gr.update(visible=False), "Logged out successfully"
105
 
106
+ with gr.Blocks(css="""
107
+ .gradio-container {max-width: 750px; margin: auto;}
108
+ .chatbot {min-height: 500px;}
109
+ """) as demo:
110
+
111
  # Login Page
112
+ with gr.Group(visible=True) as login_group:
113
+ gr.Markdown("# πŸ” Tech Chatbot Login")
114
+ with gr.Row():
115
+ username = gr.Textbox(label="Username", placeholder="Enter your username")
116
+ password = gr.Textbox(label="Password", type="password", placeholder="Enter your password")
117
+ with gr.Row():
118
+ login_btn = gr.Button("Login", variant="primary")
119
  login_status = gr.Markdown("")
120
 
121
  # Chatbot Page
122
+ with gr.Group(visible=False) as chat_group:
123
+ gr.Markdown("# πŸ’» Tech Assistant")
124
  chatbot = gr.Chatbot(height=500)
125
+ with gr.Row():
126
+ msg = gr.Textbox(placeholder="Ask about programming, AI, cybersecurity...",
127
+ label="Your Tech Question", scale=4)
128
+ submit_btn = gr.Button("Send", variant="primary", scale=1)
129
+ with gr.Row():
130
+ clear = gr.Button("Clear Chat")
131
+ logout_btn = gr.Button("Logout")
132
 
133
+ msg.submit(chat_with_model, [msg, chatbot], [chatbot])
134
+ submit_btn.click(chat_with_model, [msg, chatbot], [chatbot])
135
+ clear.click(lambda: None, None, chatbot, queue=False)
136
+ logout_btn.click(logout, None, [login_group, chat_group, login_status])
137
 
138
  # Button Logic
139
  login_btn.click(login, [username, password], [login_group, chat_group, login_status])
140
 
141
  if __name__ == "__main__":
142
+ demo.launch(server_name="0.0.0.0", server_port=7860)