shahiil commited on
Commit
156cd68
·
verified ·
1 Parent(s): a9ba175

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+
4
+ # Load pre-trained Hugging Face model for recommendation tasks
5
+ model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Specialized model for general tasks
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Increase the tokenizer's max length to handle larger inputs
10
+ tokenizer.model_max_length = 2048 # Increase this to a higher number if needed
11
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
+
13
+ # Function to handle laptop recommendation tasks
14
+ def laptop_recommendation(user_input, task):
15
+ """
16
+ Handles laptop recommendation tasks based on user preferences.
17
+
18
+ Parameters:
19
+ - user_input: str, the input text from the user with laptop preferences.
20
+ - task: str, the type of task (e.g., "Recommendation", "Compare", "Budget Recommendation").
21
+
22
+ Returns:
23
+ - str: The generated response.
24
+ """
25
+ # Construct prompts based on the selected task
26
+ if task == "Recommendation":
27
+ prompt = f"Recommend a laptop based on the following preferences:\n{user_input}\nRecommended Laptop:"
28
+ elif task == "Compare":
29
+ prompt = f"Compare two laptops based on the following specifications:\n{user_input}\nComparison:"
30
+ elif task == "Budget Recommendation":
31
+ prompt = f"Recommend the best laptop for the following budget:\n{user_input}\nRecommended Laptop for Budget:"
32
+ else:
33
+ return "Invalid task selected."
34
+
35
+ # Generate response using the model with an increased max length
36
+ response = text_generator(
37
+ prompt,
38
+ max_length=96, # You can adjust this to fit the response size you want
39
+ num_return_sequences=1,
40
+ pad_token_id=tokenizer.eos_token_id,
41
+ temperature=0.7,
42
+ top_p=0.9
43
+ )[0]["generated_text"]
44
+
45
+ # Extract only the generated response beyond the prompt
46
+ return response[len(prompt):].strip()
47
+
48
+ # Gradio Interface
49
+ def gradio_interface(user_input, task):
50
+ """
51
+ Interface function for Gradio integration.
52
+ """
53
+ if not user_input.strip():
54
+ return "Please enter some input."
55
+ return laptop_recommendation(user_input, task)
56
+
57
+ with gr.Blocks() as laptop_recommendation_ui:
58
+ gr.Markdown("# Laptop Recommendation Chatbot")
59
+ gr.Markdown("This chatbot helps with recommending laptops based on preferences, comparing laptops, and suggesting options based on budget.")
60
+
61
+ # User input components
62
+ user_input = gr.Textbox(lines=5, placeholder="Enter your laptop preferences here...", label="Your Input")
63
+ task = gr.Radio(["Recommendation", "Compare", "Budget Recommendation"], label="Select Task")
64
+ output = gr.Textbox(lines=10, label="Chatbot Response")
65
+
66
+ # Buttons
67
+ submit_button = gr.Button("Submit")
68
+ clear_button = gr.Button("Clear")
69
+
70
+ # Interaction
71
+ submit_button.click(gradio_interface, inputs=[user_input, task], outputs=output)
72
+ clear_button.click(lambda: ("", ""), None, [user_input, output])
73
+
74
+ laptop_recommendation_ui.launch()