Update app.py
Browse files
app.py
CHANGED
|
@@ -1,59 +1,58 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
-
from
|
| 4 |
-
from langchain_core.prompts import PromptTemplate
|
| 5 |
from langchain.memory import ConversationBufferMemory
|
| 6 |
from langchain.chains import LLMChain
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# Load HF pipeline (CPU-only model like flan-t5-base)
|
| 9 |
pipe = pipeline(
|
| 10 |
-
"
|
| 11 |
-
model=
|
|
|
|
| 12 |
max_new_tokens=150,
|
| 13 |
do_sample=True,
|
| 14 |
temperature=0.7,
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
| 16 |
)
|
| 17 |
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
"Respond with compassion, intelligence, and charm. Use metaphors or ancient Indian wisdom sometimes.\n\n"
|
| 24 |
-
"{chat_history}\n"
|
| 25 |
-
"User: {user_message}\nKrish:"
|
| 26 |
-
)
|
| 27 |
-
prompt = PromptTemplate(
|
| 28 |
-
input_variables=["chat_history", "user_message"],
|
| 29 |
-
template=template
|
| 30 |
-
)
|
| 31 |
|
| 32 |
-
|
| 33 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
| 34 |
|
| 35 |
-
|
| 36 |
-
llm_chain = LLMChain(
|
| 37 |
-
llm=llm,
|
| 38 |
-
prompt=prompt,
|
| 39 |
-
memory=memory,
|
| 40 |
-
verbose=False,
|
| 41 |
-
)
|
| 42 |
|
| 43 |
-
#
|
| 44 |
def chat(user_message, history):
|
| 45 |
-
memory.chat_memory.clear() # Always sync history
|
| 46 |
-
for h in history:
|
| 47 |
-
memory.chat_memory.add_user_message(h[0])
|
| 48 |
-
memory.chat_memory.add_ai_message(h[1])
|
| 49 |
response = llm_chain.predict(user_message=user_message)
|
| 50 |
return response
|
| 51 |
|
| 52 |
-
# Gradio ChatInterface with retry + clear
|
| 53 |
chatbot = gr.ChatInterface(
|
| 54 |
fn=chat,
|
| 55 |
-
|
| 56 |
-
description="A wise, witty, and compassionate friend
|
|
|
|
|
|
|
|
|
|
| 57 |
)
|
| 58 |
|
| 59 |
if __name__ == "__main__":
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
|
|
| 4 |
from langchain.memory import ConversationBufferMemory
|
| 5 |
from langchain.chains import LLMChain
|
| 6 |
+
from langchain_core.prompts import PromptTemplate
|
| 7 |
+
from langchain_community.llms import HuggingFacePipeline
|
| 8 |
+
import gradio as gr
|
| 9 |
+
|
| 10 |
+
# Load tokenizer and model
|
| 11 |
+
model_id = "OpenAssistant/oasst-sft-1-pythia-12b"
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 13 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 14 |
|
|
|
|
| 15 |
pipe = pipeline(
|
| 16 |
+
"text-generation",
|
| 17 |
+
model=model,
|
| 18 |
+
tokenizer=tokenizer,
|
| 19 |
max_new_tokens=150,
|
| 20 |
do_sample=True,
|
| 21 |
temperature=0.7,
|
| 22 |
+
top_k=50,
|
| 23 |
+
top_p=0.9,
|
| 24 |
+
repetition_penalty=1.1,
|
| 25 |
+
device=-1 # Force CPU
|
| 26 |
)
|
| 27 |
|
| 28 |
+
# Define Krish personality
|
| 29 |
+
template = """You are Krish — a warm, witty, and wise friend inspired by Lord Krishna from the Puranas.
|
| 30 |
+
Krish speaks with compassion, intelligence, and playful charm.
|
| 31 |
+
He helps users with any problem or question, offering support like a true friend.
|
| 32 |
+
Use gentle wisdom and occasional ancient Indian metaphors or proverbs.
|
| 33 |
|
| 34 |
+
{chat_history}
|
| 35 |
+
User: {user_message}
|
| 36 |
+
Krish:"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
prompt = PromptTemplate(input_variables=["chat_history", "user_message"], template=template)
|
| 39 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
| 40 |
|
| 41 |
+
llm = HuggingFacePipeline(pipeline=pipe)
|
| 42 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt, memory=memory, verbose=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
# Define Gradio interface logic
|
| 45 |
def chat(user_message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
response = llm_chain.predict(user_message=user_message)
|
| 47 |
return response
|
| 48 |
|
|
|
|
| 49 |
chatbot = gr.ChatInterface(
|
| 50 |
fn=chat,
|
| 51 |
+
title="🦚 Meet Krish",
|
| 52 |
+
description="A wise, witty, and compassionate friend ",
|
| 53 |
+
theme="default",
|
| 54 |
+
textbox=gr.Textbox(placeholder="Say something to Krish...", scale=7),
|
| 55 |
+
# clear_btn="Clear",
|
| 56 |
)
|
| 57 |
|
| 58 |
if __name__ == "__main__":
|