Tulitula commited on
Commit
81eab95
·
verified ·
1 Parent(s): 8a8c0aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -20,23 +20,29 @@ category_generator = pipeline(
20
  max_new_tokens=50,
21
  do_sample=True,
22
  temperature=1.0
23
- )
24
  analysis_generator = pipeline(
25
  "text2text-generation",
26
- model="google/flan-t5-base",
27
- tokenizer="google/flan-t5-base",
28
- max_new_tokens=1000,
29
  do_sample=True,
30
  temperature=1.0
31
- )
 
 
 
32
  suggestion_generator = pipeline(
33
  "text2text-generation",
34
- model="google/flan-t5-base",
35
- tokenizer="google/flan-t5-base",
36
- max_new_tokens=1000,
37
  do_sample=True,
38
  temperature=1.0
39
- )
 
 
 
40
 
41
  # Example URLs for gallery
42
  def get_recommendations():
 
20
  max_new_tokens=50,
21
  do_sample=True,
22
  temperature=1.0
23
+ ) # Flan-T5-small for fast category generation
24
  analysis_generator = pipeline(
25
  "text2text-generation",
26
+ model="google/flan-t5-small",
27
+ tokenizer="google/flan-t5-small",
28
+ max_new_tokens=500, # increased from 200 to 500
29
  do_sample=True,
30
  temperature=1.0
31
+ ) # Switched to Flan-T5-small for faster analysis # reduced tokens for speed
32
+ do_sample=True,
33
+ temperature=1.0
34
+ ) # Switched to Flan-T5-small for faster analysis
35
  suggestion_generator = pipeline(
36
  "text2text-generation",
37
+ model="google/flan-t5-small",
38
+ tokenizer="google/flan-t5-small",
39
+ max_new_tokens=500, # increased from 200 to 500
40
  do_sample=True,
41
  temperature=1.0
42
+ ) # Using Flan-T5-small for quicker suggestions # reduced tokens for speed
43
+ do_sample=True,
44
+ temperature=1.0
45
+ ) # Using Flan-T5-small for quicker suggestions
46
 
47
  # Example URLs for gallery
48
  def get_recommendations():