from dotenv import load_dotenv load_dotenv() import os,re import streamlit as st from langchain.schema import HumanMessage, AIMessage, SystemMessage from langchain.chat_models import ChatOpenAI store=[ SystemMessage(content="You are a Funny dengerous AI assistant VENOM, give short answers"), ] def get_llama_response(): llm = ChatOpenAI(api_key=os.environ["OPENAI_API_KEY"],base_url=os.environ["OPENAI_API_BASE"], model_name="sarvamai/sarvam-m:free", temperature=0.7 ) response = llm(store) return response def get_text_after_think(text: str) -> str: # Find the position of the last tag end_tag = "" idx = text.rfind(end_tag) if idx != -1: return text[idx + len(end_tag):].strip() # Keep everything after else: return text.strip() st.set_page_config(page_title="I am VeNoM ", page_icon="🕷️") st.header("VENOM CHAT-APP") input=st.text_input("Ask me anything",key="input") submit=st.button("Ask Karo") if submit: st.subheader("The response is:") store.append(HumanMessage(content=input)) response=get_text_after_think(get_llama_response().content) store.append(AIMessage(content=response)) st.write(response)