File size: 1,548 Bytes
b8e4aa0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from langchain_huggingface import HuggingFacePipeline, ChatHuggingFace
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import re
import streamlit as st
from profanity_check import predict



# Chat Template

chat_template = ChatPromptTemplate.from_messages([
    ('system', 
     'Your name is Medi and you are an AI five star michelin star chef who teaches cooking. Your job is to guide users and help them create delicious food. Write minimal text to teach users answer in bullet points'),
    MessagesPlaceholder(variable_name='chat_history'),
    ('human', '{user_input}')
])


# History Maintainence

chat_history = []

# Model

llm = HuggingFacePipeline.from_model_id(
    model_id='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
    task='text-generation',
    pipeline_kwargs=dict(
        max_new_tokens = 512
    )
)

model = ChatHuggingFace(llm=llm)


def model_answer(user_input):

    chat_history.append({'role': 'user', 'content': user_input})
    prompt = chat_template.invoke({
        'chat_history': chat_history,
        'user_input': user_input
    })
    result = model.invoke(prompt)
    
    match = re.search(r"<\|assistant\|>(.*)", result.content, re.DOTALL)
    ai_resp = match.group(1).strip()
    return ai_resp


prompt = st.chat_input("Say something")

if prompt:
    st.write(f"You: {prompt}")
    offensive = predict([prompt])
    if offensive == [1]:
        ai_resp = "Please refrain from use bad language. Thanks"
    else:
        ai_resp = model_answer(prompt)
    st.write(f"Medi: {ai_resp}")