RayanAli commited on
Commit
d708e2f
·
verified ·
1 Parent(s): 13fd3f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -34
app.py CHANGED
@@ -1,45 +1,29 @@
1
- from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
2
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
3
- from profanity_check import predict
4
  import streamlit as st
5
- import re
 
 
 
6
 
7
- st.set_page_config(page_title="Chef Medi", layout="centered")
8
  st.title("Chef Medi")
9
  st.write("Share what you want to cook...")
10
 
11
- chat_template = ChatPromptTemplate.from_messages([
12
- ('system', 'You are Medi, a concise 5-star Michelin chef AI. Teach in bullet points.'),
13
- MessagesPlaceholder(variable_name='chat_history'),
14
- ('human', '{user_input}')
15
- ])
16
 
17
  chat_history = []
18
 
19
- llm = HuggingFacePipeline.from_model_id(
20
- model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
21
- task='text-generation',
22
- pipeline_kwargs=dict(
23
- max_new_tokens=128,
24
- temperature=0.7
25
- )
26
- )
27
 
28
- model = ChatHuggingFace(llm=llm)
29
-
30
- def model_answer(user_input):
31
- recent_history = chat_history[-3:]
32
- recent_history.append({'role': 'user', 'content': user_input})
33
- prompt = chat_template.invoke({
34
- 'chat_history': recent_history,
35
- 'user_input': user_input
36
- })
37
- result = model.invoke(prompt)
38
- match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
39
- ai_resp = match.group(1).strip()
40
- return ai_resp
41
-
42
- prompt = st.chat_input("Say something")
43
 
44
  if prompt:
45
  st.write(f"You: {prompt}")
@@ -47,7 +31,7 @@ if prompt:
47
  if offensive == [1]:
48
  ai_resp = "Please refrain from using bad language. Thanks"
49
  else:
50
- ai_resp = model_answer(prompt)
51
  chat_history.append({'role': 'user', 'content': prompt})
52
  chat_history.append({'role': 'assistant', 'content': ai_resp})
53
  st.write(f"Medi: {ai_resp}")
 
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from profanity_check import predict
5
+
6
 
 
7
  st.title("Chef Medi")
8
  st.write("Share what you want to cook...")
9
 
10
+ prompt = st.chat_input("Say something")
 
 
 
 
11
 
12
  chat_history = []
13
 
14
+ model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
16
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
 
 
 
 
 
17
 
18
+ def generate_reply(prompt):
19
+ chat_history.append(f"User: {prompt}")
20
+ prompt = "You are Medi, a 5-star chef. Be brief and answer in bullet points.\n" + "\n".join(chat_history) + "\nAssistant:"
21
+ inputs = tokenizer(prompt, return_tensors="pt")
22
+ outputs = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
23
+ full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ reply = full_output.split("Assistant:")[-1].strip()
25
+ chat_history.append(f"Assistant: {reply}")
26
+ return reply
 
 
 
 
 
 
27
 
28
  if prompt:
29
  st.write(f"You: {prompt}")
 
31
  if offensive == [1]:
32
  ai_resp = "Please refrain from using bad language. Thanks"
33
  else:
34
+ ai_resp = generate_reply(prompt)
35
  chat_history.append({'role': 'user', 'content': prompt})
36
  chat_history.append({'role': 'assistant', 'content': ai_resp})
37
  st.write(f"Medi: {ai_resp}")