shahiil's picture
Create app.py
dd8c125 verified
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Load pre-trained Hugging Face model for recommendation tasks
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Replace with your desired model
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=True)
# Initialize text generation pipeline
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Function to handle laptop recommendation tasks
def laptop_recommendation(user_input, task):
"""
Handles laptop recommendation tasks based on user preferences.
Parameters:
- user_input: str, the input text from the user with laptop preferences.
- task: str, the type of task (e.g., "Recommendation", "Compare", "Budget Recommendation").
Returns:
- str: The generated response.
"""
if not user_input.strip():
return "Please provide some input."
# Construct prompts based on the selected task
if task == "Recommendation":
prompt = f"Recommend a laptop based on the following preferences:\n{user_input}\nRecommended Laptop:"
elif task == "Compare":
prompt = f"Compare two laptops based on the following specifications:\n{user_input}\nComparison:"
elif task == "Budget Recommendation":
prompt = f"Recommend the best laptop for the following budget:\n{user_input}\nRecommended Laptop for Budget:"
else:
return "Invalid task selected."
try:
# Generate response using the model
response = text_generator(
prompt,
max_length=96, # Adjust for appropriate response length
num_return_sequences=1,
pad_token_id=tokenizer.eos_token_id,
temperature=0.7,
top_p=0.9
)[0]["generated_text"]
# Extract only the generated response beyond the prompt
return response[len(prompt):].strip()
except Exception as e:
return f"An error occurred during text generation: {str(e)}"
# Gradio Interface
def gradio_interface(user_input, task):
"""
Interface function for Gradio integration.
"""
return laptop_recommendation(user_input, task)
with gr.Blocks() as laptop_recommendation_ui:
gr.Markdown("# Laptop Recommendation Chatbot")
gr.Markdown(
"This chatbot helps with recommending laptops based on preferences, comparing laptops, and suggesting options based on budget."
)
# User input components
user_input = gr.Textbox(lines=5, placeholder="Enter your laptop preferences here...", label="Your Input")
task = gr.Radio(["Recommendation", "Compare", "Budget Recommendation"], label="Select Task")
output = gr.Textbox(lines=10, label="Chatbot Response")
# Buttons
submit_button = gr.Button("Submit")
clear_button = gr.Button("Clear")
# Interaction
submit_button.click(gradio_interface, inputs=[user_input, task], outputs=output)
clear_button.click(lambda: ("", ""), None, [user_input, output])
laptop_recommendation_ui.launch()