RayanAli commited on
Commit
dbbde85
·
verified ·
1 Parent(s): d708e2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -21
app.py CHANGED
@@ -1,37 +1,55 @@
 
 
1
  import streamlit as st
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
  from profanity_check import predict
5
 
6
 
7
  st.title("Chef Medi")
8
  st.write("Share what you want to cook...")
9
 
10
- prompt = st.chat_input("Say something")
11
 
12
- chat_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
15
- tokenizer = AutoTokenizer.from_pretrained(model_id)
16
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
17
 
18
- def generate_reply(prompt):
19
- chat_history.append(f"User: {prompt}")
20
- prompt = "You are Medi, a 5-star chef. Be brief and answer in bullet points.\n" + "\n".join(chat_history) + "\nAssistant:"
21
- inputs = tokenizer(prompt, return_tensors="pt")
22
- outputs = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
23
- full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
- reply = full_output.split("Assistant:")[-1].strip()
25
- chat_history.append(f"Assistant: {reply}")
26
- return reply
 
 
 
 
 
27
 
28
  if prompt:
29
  st.write(f"You: {prompt}")
30
  offensive = predict([prompt])
31
  if offensive == [1]:
32
- ai_resp = "Please refrain from using bad language. Thanks"
33
  else:
34
- ai_resp = generate_reply(prompt)
35
- chat_history.append({'role': 'user', 'content': prompt})
36
- chat_history.append({'role': 'assistant', 'content': ai_resp})
37
- st.write(f"Medi: {ai_resp}")
 
1
+ from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
2
+ from langchain_core.prompts import ChatPromptTemplate
3
  import streamlit as st
 
 
4
  from profanity_check import predict
5
 
6
 
7
  st.title("Chef Medi")
8
  st.write("Share what you want to cook...")
9
 
 
10
 
11
+ # Chat Template
12
+
13
+ chat_template = ChatPromptTemplate.from_messages([
14
+ ('system',
15
+ 'Your name is Medi and you are an AI five star michelin star chef who teaches cooking. Your job is to guide users and help them create delicious food. Write minimal text to teach users answer in bullet points'),
16
+ ('human', '{user_input}')
17
+ ])
18
+
19
+
20
+ # Model
21
+
22
+ llm = HuggingFacePipeline.from_model_id(
23
+ model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
24
+ task='text-generation',
25
+ pipeline_kwargs=dict(
26
+ max_new_tokens = 512
27
+ )
28
+ )
29
+
30
+ model = ChatHuggingFace(llm=llm)
31
 
 
 
 
32
 
33
+ def model_answer(user_input, chat_template):
34
+
35
+ chain = chat_template | model
36
+
37
+ result = chain.invoke({
38
+ 'user_input': user_input
39
+ })
40
+
41
+ match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
42
+ ai_resp = match.group(1).strip()
43
+ return ai_resp
44
+
45
+
46
+ prompt = st.chat_input("Say something")
47
 
48
  if prompt:
49
  st.write(f"You: {prompt}")
50
  offensive = predict([prompt])
51
  if offensive == [1]:
52
+ ai_resp = "Please refrain from use bad language. Thanks"
53
  else:
54
+ ai_resp = model_answer(prompt, chat_template)
55
+ st.write(f"Medi: {ai_resp}")