BhavaishKumar112 commited on
Commit
d2ae30d
·
verified ·
1 Parent(s): 0017612

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -2,7 +2,6 @@ import json
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  from format.format_output import format_output
5
- from validate.validate_ingredients import validate_ingredients
6
  from device.get_device_id import get_device_id
7
 
8
  tokenizer = AutoTokenizer.from_pretrained("Ashikan/dut-recipe-generator")
@@ -14,13 +13,19 @@ def perform_model_inference(ingredients_list=None, recipe_name=None):
14
  for ingredient_index in range(len(ingredients_list)):
15
  ingredients_list[ingredient_index] = ingredients_list[ingredient_index].strip()
16
 
17
- input_text = '{"prompt": ' + json.dumps(ingredients_list)
 
18
  elif recipe_name:
19
- input_text = '{"prompt": "Generate ingredients and method for the recipe: ' + recipe_name + '"}'
 
20
  else:
21
  return "Invalid input"
22
 
23
- output = pipe(input_text, max_length=1024, temperature=0.1, do_sample=True, truncation=True)[0]["generated_text"]
 
 
 
 
24
 
25
  return format_output(output)
26
 
 
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  from format.format_output import format_output
 
5
  from device.get_device_id import get_device_id
6
 
7
  tokenizer = AutoTokenizer.from_pretrained("Ashikan/dut-recipe-generator")
 
13
  for ingredient_index in range(len(ingredients_list)):
14
  ingredients_list[ingredient_index] = ingredients_list[ingredient_index].strip()
15
 
16
+ # Create a simple prompt without JSON formatting
17
+ input_text = "Generate a recipe with these ingredients: " + ", ".join(ingredients_list)
18
  elif recipe_name:
19
+ # Simple prompt for recipe name
20
+ input_text = "Generate ingredients and method for the recipe: " + recipe_name
21
  else:
22
  return "Invalid input"
23
 
24
+ # Limit the length of the input text to avoid long processing times
25
+ input_text = input_text[:512] # Truncate if it's too long
26
+
27
+ # Use higher temperature for quicker responses
28
+ output = pipe(input_text, max_length=512, temperature=0.7, do_sample=True, truncation=True)[0]["generated_text"]
29
 
30
  return format_output(output)
31