Spaces:
Sleeping
Sleeping
File size: 3,931 Bytes
97f2c73 906c141 97f2c73 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | # from langchain_ollama import ChatOllama
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from dotenv import load_dotenv
import uuid
import gradio as gr
load_dotenv() # Load environment variables from .env file
store = {}
def get_session_history(session_id):
"""
Function to get the session history.
This function can be modified to retrieve history from a database or any other storage.
"""
if( session_id not in store):
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
#set up llama3.2:latest model with Ollama
# llm = ChatOllama(model="llama3.2:latest", temperature=2.0)
llm=ChatGoogleGenerativeAI(
model="gemini-2.0-flash"
)
#Create a chat prompt template
prompt = ChatPromptTemplate.from_messages(
messages=[
# ("system", "You are a helpful assistant. Answer the user's questions to the best of your ability."),
("system", "You are a friendly and helpful assistant named Zenbot. Start by greeting the user and then answer their questions."),
MessagesPlaceholder(variable_name="history"),
("human", "{input}")
]
)
#Combine the prompt and the model into a chain. 1st version
# chain = prompt | llm
# response = chain.invoke({"question": "How to install gradio?"})
# print(response.content)
#chain with message history
# This allows the model to remember previous interactions in the session
chain = RunnableWithMessageHistory(
runnable= prompt | llm,
get_session_history=get_session_history,
input_messages_key="input",
history_messages_key="history",
)
#Function to handle user input and generate a response. 1st version
# def chatbot(question):
# if(question.strip() == ""):
# return "Please enter a valid question."
# response = chain.invoke({"question": question})
# return response.content
def chatbot(user_input,history_state,temprature_slider, session_id=str(uuid.uuid4())):
"""
Function to handle user input and generate a response.
It maintains the session history and returns the response.
"""
if(user_input.strip() == ""):
return "Please enter a valid question."
llm.temperature = temprature_slider # Set the temperature for the model
response = chain.invoke(
{"input": user_input},
config={"configurable": {"session_id": session_id}}
).content
if history_state is None:
history_state = []
history_state.append((user_input, response))
return str(store[session_id]) #, "Temprature : " + str(temprature_slider)
def clear(session_id):
print("Clearing conversation history")
store[session_id] = InMemoryChatMessageHistory()
#Create a Gradio interface
with gr.Blocks() as demo:
gr.Markdown("Zensar chatbot")
history_state = gr.State(value=None) # To keep track of the conversation history
session_id = gr.State(value=str(uuid.uuid4())) # Unique session ID for each user
input_box = gr.Textbox(label="Ask a question", placeholder="Type your question here...")
temprature_slider = gr.Slider(
label="Temperature",
minimum=0.0,
maximum=1.0,
value=0.5,
step=0.1,
interactive=True,
)
output_box = gr.Textbox(label="Answer", interactive=False)
submit_button = gr.Button("Submit")
clear_button = gr.ClearButton(components=[input_box, output_box, temprature_slider])
submit_button.click(
fn=chatbot,
inputs=[input_box,history_state,temprature_slider, session_id],
outputs=output_box
)
clear_button.click(
fn=clear,
inputs=[session_id],
)
# Launch the Gradio app
demo.launch()
|