Update app.py
Browse files
app.py
CHANGED
|
@@ -1,47 +1,34 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
import torch.nn as nn
|
| 4 |
-
import torch.nn.functional as F
|
| 5 |
import os
|
| 6 |
-
|
| 7 |
|
| 8 |
-
# Set Hugging Face API
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
super(SelfAttentionAI, self).__init__()
|
| 18 |
-
self.bert = DistilBertModel.from_pretrained("distilbert-base-uncased")
|
| 19 |
-
self.fc = nn.Linear(768, 1)
|
| 20 |
-
|
| 21 |
-
def forward(self, input_ids, attention_mask):
|
| 22 |
-
outputs = self.bert(input_ids, attention_mask=attention_mask)
|
| 23 |
-
return self.fc(outputs.last_hidden_state[:, 0, :])
|
| 24 |
-
|
| 25 |
-
# Load tokenizer and model
|
| 26 |
-
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
|
| 27 |
-
model = SelfAttentionAI().to(device) # Move model to appropriate device
|
| 28 |
|
| 29 |
def ai_tutor(question):
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
response = model(inputs["input_ids"], inputs["attention_mask"])
|
| 35 |
-
|
| 36 |
-
# Convert tensor response into a readable format
|
| 37 |
-
confidence_score = torch.sigmoid(response).item()
|
| 38 |
-
response_text = "This is a high-confidence answer." if confidence_score > 0.5 else "This is a low-confidence answer."
|
| 39 |
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
# Gradio Chatbot UI
|
| 43 |
with gr.Blocks() as demo:
|
| 44 |
-
gr.Markdown("## AI-Powered Mechanical Engineering Learning Chatbot")
|
| 45 |
chatbot = gr.Chatbot()
|
| 46 |
input_text = gr.Textbox(label="Ask a Question")
|
| 47 |
submit_btn = gr.Button("Submit")
|
|
@@ -49,12 +36,6 @@ with gr.Blocks() as demo:
|
|
| 49 |
def respond(message, history):
|
| 50 |
response = ai_tutor(message)
|
| 51 |
history.append((message, response))
|
| 52 |
-
|
| 53 |
-
# Limit chat history to avoid excessive memory usage
|
| 54 |
-
max_history_length = 10
|
| 55 |
-
if len(history) > max_history_length:
|
| 56 |
-
history = history[-max_history_length:]
|
| 57 |
-
|
| 58 |
return history, ""
|
| 59 |
|
| 60 |
submit_btn.click(respond, inputs=[input_text, chatbot], outputs=[chatbot, input_text])
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 2 |
import os
|
| 3 |
+
import requests
|
| 4 |
|
| 5 |
+
# Set your Hugging Face API key
|
| 6 |
+
HF_API_KEY = "HF_API_KEY"
|
| 7 |
+
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct"
|
| 8 |
|
| 9 |
+
headers = {"Authorization": f"Bearer {HF_API_KEY}"}
|
|
|
|
| 10 |
|
| 11 |
+
def query_huggingface(payload):
|
| 12 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 13 |
+
return response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
def ai_tutor(question):
|
| 16 |
+
payload = {
|
| 17 |
+
"inputs": question,
|
| 18 |
+
"parameters": {"max_new_tokens": 100, "temperature": 0.7}
|
| 19 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
result = query_huggingface(payload)
|
| 22 |
+
|
| 23 |
+
# Extract and format the response
|
| 24 |
+
if "error" in result:
|
| 25 |
+
return "Sorry, I couldn't process the request at the moment."
|
| 26 |
+
|
| 27 |
+
return result[0]["generated_text"]
|
| 28 |
|
| 29 |
# Gradio Chatbot UI
|
| 30 |
with gr.Blocks() as demo:
|
| 31 |
+
gr.Markdown("## 🤖 AI-Powered Mechanical Engineering Learning Chatbot")
|
| 32 |
chatbot = gr.Chatbot()
|
| 33 |
input_text = gr.Textbox(label="Ask a Question")
|
| 34 |
submit_btn = gr.Button("Submit")
|
|
|
|
| 36 |
def respond(message, history):
|
| 37 |
response = ai_tutor(message)
|
| 38 |
history.append((message, response))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
return history, ""
|
| 40 |
|
| 41 |
submit_btn.click(respond, inputs=[input_text, chatbot], outputs=[chatbot, input_text])
|