abaansohail131 commited on
Commit
01515a2
·
verified ·
1 Parent(s): 85631bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -50
app.py CHANGED
@@ -1,64 +1,86 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
41
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
 
59
  ],
 
 
60
  )
61
 
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ import os
4
+ import gdown
5
+ from pathlib import Path
6
 
7
+ MODEL_FILE_ID = "1-4c80EuzAzXvmXsuA6pgVk7Drkv8Dh67"
8
+ MODEL_FILE_NAME = "model.safetensors" # Adjust based on your file name
9
+ MODEL_DIR = "fine_tuned_gpt2_medium" # Adjust based on your file name
 
10
 
11
 
12
+ # Download the model from Google Drive
13
+ def download_model():
14
+ model_path = Path(MODEL_DIR) / MODEL_FILE_NAME
15
+ if not model_path.exists():
16
+ print("Downloading model from Google Drive...")
17
+ gdown.download(
18
+ gdown.download(
19
+ f"https://drive.google.com/uc?id={MODEL_FILE_ID}",
20
+ str(model_path),
21
+ quiet=False
22
+ ),
23
+ str(model_path),
24
+ quiet=False
25
+ )
26
+ print("Model downloaded successfully.")
27
 
28
+ # Load the model and tokenizer
29
+ def load_model():
30
+ download_model()
31
+ tokenizer = GPT2Tokenizer.from_pretrained("fine_tuned_gpt2_medium") # Use base tokenizer if necessary
32
+ model = GPT2LMHeadModel.from_pretrained(
33
+ pretrained_model_name_or_path=MODEL_DIR,
34
+ local_files_only=True # Ensure it loads from local storage
35
+ )
36
+ return model, tokenizer
37
 
38
+ model, tokenizer = load_model()
39
 
40
+ # Functions for question generation and feedback
41
+ def generate_question(response, history):
42
+ input_text = ""
43
+ for qa in history:
44
+ input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
45
+ input_text += f"Response: {response}\nNext Question:"
46
 
47
+ inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
48
+ outputs = model.generate(inputs, max_length=100, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
49
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
50
+ next_question = generated_text.split("Next Question:")[-1].strip()
51
+ return next_question
 
 
 
52
 
53
+ def get_feedback(session_data):
54
+ input_text = "Session Feedback:\n"
55
+ for qa in session_data:
56
+ input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
57
 
58
+ inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
59
+ outputs = model.generate(inputs, max_length=150, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
60
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
61
+ feedback = generated_text.split("Session Feedback:")[-1].strip()
62
+ return feedback
63
 
64
+ # Gradio Interface
65
+ def question_interface(response, history):
66
+ history = eval(history) # Convert string input to list
67
+ return generate_question(response, history)
68
+
69
+ def feedback_interface(session_data):
70
+ session_data = eval(session_data) # Convert string input to list
71
+ return get_feedback(session_data)
72
+
73
+ iface = gr.Interface(
74
+ fn={
75
+ "generate_question": question_interface,
76
+ "get_feedback": feedback_interface
77
+ },
78
+ inputs=[
79
+ gr.inputs.Textbox(label="User Response"),
80
+ gr.inputs.Textbox(label="History (list of Q&A in string format)")
81
  ],
82
+ outputs="text",
83
+ live=True
84
  )
85
 
86
+ iface.launch()