Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,64 +1,86 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
""
|
| 5 |
-
|
| 6 |
-
""
|
| 7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 8 |
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
top_p=top_p,
|
| 36 |
-
):
|
| 37 |
-
token = message.choices[0].delta.content
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
),
|
|
|
|
| 59 |
],
|
|
|
|
|
|
|
| 60 |
)
|
| 61 |
|
| 62 |
-
|
| 63 |
-
if __name__ == "__main__":
|
| 64 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 3 |
+
import os
|
| 4 |
+
import gdown
|
| 5 |
+
from pathlib import Path
|
| 6 |
|
| 7 |
+
MODEL_FILE_ID = "1-4c80EuzAzXvmXsuA6pgVk7Drkv8Dh67"
|
| 8 |
+
MODEL_FILE_NAME = "model.safetensors" # Adjust based on your file name
|
| 9 |
+
MODEL_DIR = "fine_tuned_gpt2_medium" # Adjust based on your file name
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
+
# Download the model from Google Drive
|
| 13 |
+
def download_model():
|
| 14 |
+
model_path = Path(MODEL_DIR) / MODEL_FILE_NAME
|
| 15 |
+
if not model_path.exists():
|
| 16 |
+
print("Downloading model from Google Drive...")
|
| 17 |
+
gdown.download(
|
| 18 |
+
gdown.download(
|
| 19 |
+
f"https://drive.google.com/uc?id={MODEL_FILE_ID}",
|
| 20 |
+
str(model_path),
|
| 21 |
+
quiet=False
|
| 22 |
+
),
|
| 23 |
+
str(model_path),
|
| 24 |
+
quiet=False
|
| 25 |
+
)
|
| 26 |
+
print("Model downloaded successfully.")
|
| 27 |
|
| 28 |
+
# Load the model and tokenizer
|
| 29 |
+
def load_model():
|
| 30 |
+
download_model()
|
| 31 |
+
tokenizer = GPT2Tokenizer.from_pretrained("fine_tuned_gpt2_medium") # Use base tokenizer if necessary
|
| 32 |
+
model = GPT2LMHeadModel.from_pretrained(
|
| 33 |
+
pretrained_model_name_or_path=MODEL_DIR,
|
| 34 |
+
local_files_only=True # Ensure it loads from local storage
|
| 35 |
+
)
|
| 36 |
+
return model, tokenizer
|
| 37 |
|
| 38 |
+
model, tokenizer = load_model()
|
| 39 |
|
| 40 |
+
# Functions for question generation and feedback
|
| 41 |
+
def generate_question(response, history):
|
| 42 |
+
input_text = ""
|
| 43 |
+
for qa in history:
|
| 44 |
+
input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
|
| 45 |
+
input_text += f"Response: {response}\nNext Question:"
|
| 46 |
|
| 47 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
|
| 48 |
+
outputs = model.generate(inputs, max_length=100, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
|
| 49 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 50 |
+
next_question = generated_text.split("Next Question:")[-1].strip()
|
| 51 |
+
return next_question
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
+
def get_feedback(session_data):
|
| 54 |
+
input_text = "Session Feedback:\n"
|
| 55 |
+
for qa in session_data:
|
| 56 |
+
input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
|
| 57 |
|
| 58 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
|
| 59 |
+
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
|
| 60 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 61 |
+
feedback = generated_text.split("Session Feedback:")[-1].strip()
|
| 62 |
+
return feedback
|
| 63 |
|
| 64 |
+
# Gradio Interface
|
| 65 |
+
def question_interface(response, history):
|
| 66 |
+
history = eval(history) # Convert string input to list
|
| 67 |
+
return generate_question(response, history)
|
| 68 |
+
|
| 69 |
+
def feedback_interface(session_data):
|
| 70 |
+
session_data = eval(session_data) # Convert string input to list
|
| 71 |
+
return get_feedback(session_data)
|
| 72 |
+
|
| 73 |
+
iface = gr.Interface(
|
| 74 |
+
fn={
|
| 75 |
+
"generate_question": question_interface,
|
| 76 |
+
"get_feedback": feedback_interface
|
| 77 |
+
},
|
| 78 |
+
inputs=[
|
| 79 |
+
gr.inputs.Textbox(label="User Response"),
|
| 80 |
+
gr.inputs.Textbox(label="History (list of Q&A in string format)")
|
| 81 |
],
|
| 82 |
+
outputs="text",
|
| 83 |
+
live=True
|
| 84 |
)
|
| 85 |
|
| 86 |
+
iface.launch()
|
|
|
|
|
|