RayanAli commited on
Commit
26a34e6
·
verified ·
1 Parent(s): 7dab3a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -24
app.py CHANGED
@@ -1,55 +1,40 @@
1
  from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
2
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
3
- import re
4
- import streamlit as st
5
  from profanity_check import predict
6
-
7
 
8
  st.set_page_config(page_title="Chef Medi", layout="centered")
9
  st.title("Chef Medi")
10
  st.write("Share what you want to cook...")
11
 
12
- # Chat Template
13
-
14
  chat_template = ChatPromptTemplate.from_messages([
15
- ('system',
16
- 'Your name is Medi and you are an AI five star michelin star chef who teaches cooking. Your job is to guide users and help them create delicious food. Write minimal text to teach users answer in bullet points'),
17
  MessagesPlaceholder(variable_name='chat_history'),
18
  ('human', '{user_input}')
19
  ])
20
 
21
-
22
- # History Maintainence
23
-
24
  chat_history = []
25
 
26
- # Model
27
-
28
  llm = HuggingFacePipeline.from_model_id(
29
  model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
30
  task='text-generation',
31
  pipeline_kwargs=dict(
32
- max_new_tokens = 512,
33
- temperature = 0.1
34
  )
35
  )
36
 
37
  model = ChatHuggingFace(llm=llm)
38
 
39
-
40
  def model_answer(user_input):
41
-
42
- chat_history.append({'role': 'user', 'content': user_input})
43
  prompt = chat_template.invoke({
44
- 'chat_history': chat_history,
45
  'user_input': user_input
46
  })
47
  result = model.invoke(prompt)
48
-
49
- match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
50
- ai_resp = match.group(1).strip()
51
- return ai_resp
52
-
53
 
54
  prompt = st.chat_input("Say something")
55
 
@@ -60,4 +45,6 @@ if prompt:
60
  ai_resp = "Please refrain from using bad language. Thanks"
61
  else:
62
  ai_resp = model_answer(prompt)
63
- st.write(f"Medi: {ai_resp}")
 
 
 
1
  from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
2
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
 
 
3
  from profanity_check import predict
4
+ import streamlit as st
5
 
6
  st.set_page_config(page_title="Chef Medi", layout="centered")
7
  st.title("Chef Medi")
8
  st.write("Share what you want to cook...")
9
 
 
 
10
  chat_template = ChatPromptTemplate.from_messages([
11
+ ('system', 'You are Medi, a concise 5-star Michelin chef AI. Teach in bullet points.'),
 
12
  MessagesPlaceholder(variable_name='chat_history'),
13
  ('human', '{user_input}')
14
  ])
15
 
 
 
 
16
  chat_history = []
17
 
 
 
18
  llm = HuggingFacePipeline.from_model_id(
19
  model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
20
  task='text-generation',
21
  pipeline_kwargs=dict(
22
+ max_new_tokens=128,
23
+ temperature=0.7
24
  )
25
  )
26
 
27
  model = ChatHuggingFace(llm=llm)
28
 
 
29
  def model_answer(user_input):
30
+ recent_history = chat_history[-3:]
31
+ recent_history.append({'role': 'user', 'content': user_input})
32
  prompt = chat_template.invoke({
33
+ 'chat_history': recent_history,
34
  'user_input': user_input
35
  })
36
  result = model.invoke(prompt)
37
+ return result.content.strip()
 
 
 
 
38
 
39
  prompt = st.chat_input("Say something")
40
 
 
45
  ai_resp = "Please refrain from using bad language. Thanks"
46
  else:
47
  ai_resp = model_answer(prompt)
48
+ chat_history.append({'role': 'user', 'content': prompt})
49
+ chat_history.append({'role': 'assistant', 'content': ai_resp})
50
+ st.write(f"Medi: {ai_resp}")