Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from transformers import
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
import json
|
|
@@ -7,8 +7,8 @@ title = "AI ChatBot"
|
|
| 7 |
description = "A State-of-the-Art Large-scale Pretrained Response generation model (GEMMA)"
|
| 8 |
examples = [["How are you?"]]
|
| 9 |
|
| 10 |
-
tokenizer =
|
| 11 |
-
model =
|
| 12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
model.to(device)
|
| 14 |
|
|
@@ -16,35 +16,7 @@ model.to(device)
|
|
| 16 |
with open("uts_courses.json", "r") as f:
|
| 17 |
courses_data = json.load(f)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
# Check if the input question is about courses
|
| 21 |
-
if "courses" in input_text.lower():
|
| 22 |
-
# Check if the input question contains a specific field (e.g., Engineering, Information Technology, etc.)
|
| 23 |
-
for field in courses_data["courses"]:
|
| 24 |
-
if field.lower() in input_text.lower():
|
| 25 |
-
# Get the list of courses for the specified field
|
| 26 |
-
courses_list = courses_data["courses"][field]
|
| 27 |
-
# Format the response
|
| 28 |
-
response = f"The available courses in {field} are: {', '.join(courses_list)}."
|
| 29 |
-
return response, history
|
| 30 |
-
|
| 31 |
-
# If the input question is not about courses, use the dialogue model to generate a response
|
| 32 |
-
# tokenize the new input sentence
|
| 33 |
-
new_user_input_ids = tokenizer.encode(
|
| 34 |
-
input_text + tokenizer.eos_token, return_tensors="pt"
|
| 35 |
-
).to(device)
|
| 36 |
-
|
| 37 |
-
# append the new user input tokens to the chat history
|
| 38 |
-
bot_input_ids = torch.cat([torch.tensor(history).to(device), new_user_input_ids], dim=-1)
|
| 39 |
-
|
| 40 |
-
# generate a response
|
| 41 |
-
history = model.generate(
|
| 42 |
-
bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id
|
| 43 |
-
).tolist()
|
| 44 |
-
|
| 45 |
-
# convert the tokens to text, and then split the responses into lines
|
| 46 |
-
response = tokenizer.decode(history[0]).split()
|
| 47 |
-
return " ".join(response), history
|
| 48 |
|
| 49 |
def main():
|
| 50 |
# Load courses data from JSON file
|
|
|
|
| 1 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
import json
|
|
|
|
| 7 |
description = "A State-of-the-Art Large-scale Pretrained Response generation model (GEMMA)"
|
| 8 |
examples = [["How are you?"]]
|
| 9 |
|
| 10 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
| 11 |
+
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
| 12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
model.to(device)
|
| 14 |
|
|
|
|
| 16 |
with open("uts_courses.json", "r") as f:
|
| 17 |
courses_data = json.load(f)
|
| 18 |
|
| 19 |
+
# Define the predict function as before
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def main():
|
| 22 |
# Load courses data from JSON file
|