Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,37 +1,55 @@
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import torch
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
from profanity_check import predict
|
| 5 |
|
| 6 |
|
| 7 |
st.title("Chef Medi")
|
| 8 |
st.write("Share what you want to cook...")
|
| 9 |
|
| 10 |
-
prompt = st.chat_input("Say something")
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 15 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 16 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 17 |
|
| 18 |
-
def
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
if prompt:
|
| 29 |
st.write(f"You: {prompt}")
|
| 30 |
offensive = predict([prompt])
|
| 31 |
if offensive == [1]:
|
| 32 |
-
ai_resp = "Please refrain from
|
| 33 |
else:
|
| 34 |
-
ai_resp =
|
| 35 |
-
|
| 36 |
-
chat_history.append({'role': 'assistant', 'content': ai_resp})
|
| 37 |
-
st.write(f"Medi: {ai_resp}")
|
|
|
|
| 1 |
+
from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
|
| 2 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 3 |
import streamlit as st
|
|
|
|
|
|
|
| 4 |
from profanity_check import predict
|
| 5 |
|
| 6 |
|
| 7 |
st.title("Chef Medi")
|
| 8 |
st.write("Share what you want to cook...")
|
| 9 |
|
|
|
|
| 10 |
|
| 11 |
+
# Chat Template
|
| 12 |
+
|
| 13 |
+
chat_template = ChatPromptTemplate.from_messages([
|
| 14 |
+
('system',
|
| 15 |
+
'Your name is Medi and you are an AI five star michelin star chef who teaches cooking. Your job is to guide users and help them create delicious food. Write minimal text to teach users answer in bullet points'),
|
| 16 |
+
('human', '{user_input}')
|
| 17 |
+
])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Model
|
| 21 |
+
|
| 22 |
+
llm = HuggingFacePipeline.from_model_id(
|
| 23 |
+
model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
|
| 24 |
+
task='text-generation',
|
| 25 |
+
pipeline_kwargs=dict(
|
| 26 |
+
max_new_tokens = 512
|
| 27 |
+
)
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
model = ChatHuggingFace(llm=llm)
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
def model_answer(user_input, chat_template):
|
| 34 |
+
|
| 35 |
+
chain = chat_template | model
|
| 36 |
+
|
| 37 |
+
result = chain.invoke({
|
| 38 |
+
'user_input': user_input
|
| 39 |
+
})
|
| 40 |
+
|
| 41 |
+
match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
|
| 42 |
+
ai_resp = match.group(1).strip()
|
| 43 |
+
return ai_resp
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
prompt = st.chat_input("Say something")
|
| 47 |
|
| 48 |
if prompt:
|
| 49 |
st.write(f"You: {prompt}")
|
| 50 |
offensive = predict([prompt])
|
| 51 |
if offensive == [1]:
|
| 52 |
+
ai_resp = "Please refrain from use bad language. Thanks"
|
| 53 |
else:
|
| 54 |
+
ai_resp = model_answer(prompt, chat_template)
|
| 55 |
+
st.write(f"Medi: {ai_resp}")
|
|
|
|
|
|