File size: 1,484 Bytes
dbbde85
 
26a34e6
d708e2f
440767f
d708e2f
b8e4aa0
c3b5a73
bb0bbf0
b8e4aa0
 
dbbde85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43450cb
 
 
 
dbbde85
 
 
 
b8e4aa0
 
dbbde85
 
 
 
 
 
 
 
 
 
 
 
 
 
b8e4aa0
 
 
 
 
dbbde85
b8e4aa0
dbbde85
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
from profanity_check import predict
import re


st.title("Chef Medi")
st.write("Share what you want to cook...")


# Chat Template

chat_template = ChatPromptTemplate.from_messages([
    ('system', 
     'Your name is Medi and you are an AI five star michelin star chef who teaches cooking. Your job is to guide users and help them create delicious food. Write minimal text to teach users answer in bullet points'),
    ('human', '{user_input}')
])


# Model

llm = HuggingFacePipeline.from_model_id(
    model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
    task='text-generation',
    pipeline_kwargs=dict(
        max_new_tokens = 512,
        do_sample=False,
        top_p=1.0,
        temperature=1.0
    )
)

model = ChatHuggingFace(llm=llm)


def model_answer(user_input, chat_template):

    chain = chat_template | model

    result = chain.invoke({
        'user_input': user_input
    })
    
    match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
    ai_resp = match.group(1).strip()
    return ai_resp


prompt = st.chat_input("Say something")

if prompt:
    st.write(f"You: {prompt}")
    offensive = predict([prompt])
    if offensive == [1]:
        ai_resp = "Please refrain from use bad language. Thanks"
    else:
        ai_resp = model_answer(prompt, chat_template)
    st.write(f"Medi: {ai_resp}")