multitude0099's picture
increased context length
0e1de84
import gradio as gr
from transformers import pipeline
from ctransformers import AutoModelForCausalLM
from llama_cpp import Llama
def create_prompt_formats(ingredients, recipe_duration, nutritional_req):
# Initialize static strings
INTRO_BLURB = "## Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request."
INSTRUCTION_KEY = "### Instruction:"
INPUT_KEY = "### Input:"
RESPONSE_KEY = "### Response:"
END_KEY = "### End"
recipe_duration_map = {"<10": "very short", "10 - 30": "short", "30 - 90": "medium", "90 - 240": "long", "> 240": "very long"}
recipe_duration_str = recipe_duration_map[recipe_duration]
# content string to be formatted
INSTRUCTION_CONT = "You are part of a recommendation system where your communication style should be friendly, engaging, and informative.\nYour task is to recommend ingredients and a recipe, along with instructions.\nYou will receive one or more mandatory ingredients as input, along with the recipe duration, and nutritional requirements such as protein level, fat level, sugar level, etc.\nYour task is to suggest a list of ingredients that can be combined with the input ingredient(s) to create a recipe.\nYou should provide a list of ingredients, the name of the recipe, and step-by-step instructions on how to make it."
INPUT_CONT = f"""Generate a recipe that must include following ingredients: {ingredients}.\nThe recipe duration should be {recipe_duration_str}.\nFollowing are the nutrition requirements:\nprotein level should be {nutritional_req['protein']}, it should contain {nutritional_req['sugar']} sugar, {nutritional_req['carbs']} carbohydrates, {nutritional_req['fat']} fat, {nutritional_req['sat_fat']} saturated fats, {nutritional_req['calories']} level calories"""
# Combine prompt with the static strings
blurb = f"{INTRO_BLURB}"
instruction = f"{INSTRUCTION_KEY}\n{INSTRUCTION_CONT}"
input_context = f"{INPUT_KEY}\n{INPUT_CONT}"
response = f"{RESPONSE_KEY}\n"
# Create a list of prompt template elements
parts = [part for part in [blurb, instruction, input_context, response] if part]
# Join prompt template elements into a single string to create the prompt template
formatted_prompt = "\n\n".join(parts)
return formatted_prompt
def predict(ingredients, recipe_duration_mins, calories, \
carbs, protein, sugar, fat, sat_fat):
nutri_req = {"protein": protein, "sugar": sugar, "carbs": carbs, "fat": fat, "sat_fat": sat_fat, "calories": calories}
prompt = create_prompt_formats(ingredients, recipe_duration_mins, nutri_req)
# for hf
#out = pipe(prompt)[0]["generated_text"]
# for ctransf
#out = llm(prompt)
# for llama cpp
res = ""
out = llm.create_completion(prompt = prompt, max_tokens=2048, stream = True)
#cleaned_out = out.split("### Response:")[1]
#cleaned_out = cleaned_out.split("### End")[0]
#return cleaned_out
for text in out:
res += text['choices'][0]['text']
yield res
#pipe = pipeline("text-generation", model="multitude0099/llama-2-chat-7b-recipegen", model_kwargs={"load_in_8bit": False})
# cpu implementation using c transformers
#llm = AutoModelForCausalLM.from_pretrained("./../llama-2-chat-7b-recipegen-GGUF", model_file="llama-2-chat-7b-recipegen.Q5_K_M.gguf", model_type="llama")
# llama_cpp_python
llm = Llama.from_pretrained(
repo_id="multitude0099/llama-2-chat-7b-recipegen-GGUF",
filename="llama-2-chat-7b-recipegen.Q5_K_M.gguf",
verbose=True,
n_ctx = 1024
)
# examples
examples = [["milk, sugar", "30 - 90", "medium", "medium", "medium", "medium", "low", "low"]]
# drop downs
duration_dp = gr.Dropdown(choices = [" < 10 ", "10 - 30", "30 - 90", "90 - 240", "> 240"], value = "30 - 90")
calories_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "medium")
protein_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "medium")
carbs_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "medium")
sugar_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "medium")
fat_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "low")
sat_fat_dp = gr.Dropdown(choices = ["zero", "low", "medium", "high"], value = "low")
demo = gr.Interface(
fn=predict,
inputs=['text',duration_dp, calories_dp, carbs_dp, protein_dp, sugar_dp, fat_dp, sat_fat_dp],
outputs='text',
examples = examples,
allow_flagging = False
)
demo.launch()