Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,25 +5,23 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
| 5 |
model = AutoModelForCausalLM.from_pretrained(
|
| 6 |
"microsoft/Phi-3-mini-4k-instruct",
|
| 7 |
trust_remote_code=True # Allow custom code execution
|
|
|
|
| 8 |
)
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
| 10 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 11 |
|
| 12 |
def chatbot_response(user_input):
|
| 13 |
-
""" Generates a response from the chatbot based on the user's input. """
|
| 14 |
messages = [{"role": "user", "content": user_input}] # Create a fresh conversation context
|
| 15 |
output = pipe(messages, max_new_tokens=500, return_full_text=False, temperature=0.0, do_sample=False)
|
| 16 |
return output[0]['generated_text']
|
| 17 |
|
| 18 |
-
# Define the interface
|
| 19 |
iface = gr.Interface(
|
| 20 |
fn=chatbot_response,
|
| 21 |
-
inputs=gr.
|
| 22 |
-
outputs="
|
| 23 |
title="Smart Chatbot",
|
| 24 |
description="This is a smart chatbot that can answer your questions. Just type your question below and get an instant response.",
|
| 25 |
-
theme="huggingface"
|
| 26 |
-
css=".output_text {color: blue; font-size: 16px; font-weight: bold;}" # Additional custom CSS for styling
|
| 27 |
)
|
| 28 |
|
| 29 |
if __name__ == "__main__":
|
|
|
|
| 5 |
model = AutoModelForCausalLM.from_pretrained(
|
| 6 |
"microsoft/Phi-3-mini-4k-instruct",
|
| 7 |
trust_remote_code=True # Allow custom code execution
|
| 8 |
+
attn_implementation='eager'
|
| 9 |
)
|
| 10 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
| 11 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 12 |
|
| 13 |
def chatbot_response(user_input):
|
|
|
|
| 14 |
messages = [{"role": "user", "content": user_input}] # Create a fresh conversation context
|
| 15 |
output = pipe(messages, max_new_tokens=500, return_full_text=False, temperature=0.0, do_sample=False)
|
| 16 |
return output[0]['generated_text']
|
| 17 |
|
|
|
|
| 18 |
iface = gr.Interface(
|
| 19 |
fn=chatbot_response,
|
| 20 |
+
inputs=gr.components.Textbox(lines=2, placeholder="Type your question here..."),
|
| 21 |
+
outputs=gr.components.Text(label="Response"),
|
| 22 |
title="Smart Chatbot",
|
| 23 |
description="This is a smart chatbot that can answer your questions. Just type your question below and get an instant response.",
|
| 24 |
+
theme="huggingface"
|
|
|
|
| 25 |
)
|
| 26 |
|
| 27 |
if __name__ == "__main__":
|