|
|
import streamlit as st
|
|
|
import google.generativeai as genai
|
|
|
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
|
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
from langchain_core.prompts import PromptTemplate
|
|
|
import os
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="AI Tool", page_icon=":robot:")
|
|
|
st.title("GPT Clone")
|
|
|
st.sidebar.title("Select your LLM Model")
|
|
|
|
|
|
|
|
|
model = st.sidebar.selectbox("Please select any model:",
|
|
|
("Gemini", "Mistral", "Llama"),
|
|
|
placeholder="Select your LLM model...")
|
|
|
|
|
|
st.write("Your LLM Model is:", model)
|
|
|
|
|
|
|
|
|
if "api_key" not in st.session_state:
|
|
|
st.session_state["api_key"] = ''
|
|
|
|
|
|
|
|
|
def get_api_key():
|
|
|
if model == "Gemini":
|
|
|
st.session_state["api_key"] = st.sidebar.text_input("Please enter your Gemini API key", type='password')
|
|
|
else:
|
|
|
st.session_state["api_key"] = st.sidebar.text_input("Please enter your HuggingFace API key", type='password')
|
|
|
return st.session_state["api_key"]
|
|
|
|
|
|
|
|
|
def invoke_hugging_llm(model_name, api_key, prompt):
|
|
|
os.environ["HUGGINGFACEHUB_API_TOKEN"] = api_key
|
|
|
llm = HuggingFaceEndpoint(repo_id=model_name)
|
|
|
response = llm.invoke(prompt)
|
|
|
return response
|
|
|
|
|
|
import logging
|
|
|
logging.basicConfig(level=logging.DEBUG)
|
|
|
|
|
|
def get_llm_response(api_key, prompt):
|
|
|
logging.debug(f"Using API key: {api_key}")
|
|
|
logging.debug(f"Prompt: {prompt}")
|
|
|
try:
|
|
|
if model == "Mistral":
|
|
|
model_name = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
|
response = invoke_hugging_llm(model_name, api_key, prompt)
|
|
|
elif model == "Llama":
|
|
|
model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
|
|
|
response = invoke_hugging_llm(model_name, api_key, prompt)
|
|
|
elif model == "Gemini":
|
|
|
os.environ['GOOGLE_API_KEY'] = api_key
|
|
|
genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
|
|
|
llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
|
|
response = llm.invoke(prompt)
|
|
|
logging.debug(f"Gemini response: {response}")
|
|
|
return response.content
|
|
|
except Exception as e:
|
|
|
logging.error(f"Error invoking model: {e}")
|
|
|
raise e
|
|
|
return response
|
|
|
|
|
|
|
|
|
api_key = get_api_key()
|
|
|
|
|
|
|
|
|
if api_key:
|
|
|
st.success("API Key Acquired")
|
|
|
|
|
|
|
|
|
question = st.text_input("Ask your question")
|
|
|
|
|
|
|
|
|
button2 = st.button("Submit")
|
|
|
|
|
|
|
|
|
from phi.assistant import Assistant
|
|
|
from phi.tools.duckduckgo import DuckDuckGo
|
|
|
|
|
|
|
|
|
search_tool = Assistant(tools=[DuckDuckGo()], show_tool_calls=True)
|
|
|
|
|
|
|
|
|
search_result = None
|
|
|
if question:
|
|
|
try:
|
|
|
search_result = search_tool.run(question)
|
|
|
except Exception as e:
|
|
|
st.error(f"Error performing search: {str(e)}")
|
|
|
search_result = "No search results found."
|
|
|
|
|
|
|
|
|
template = """You are an AI assistant. Provide relevant answers to the user's question.
|
|
|
The user's question is: {question}.
|
|
|
If the user asks about current affairs, use the DuckDuckGo search result as context.
|
|
|
The DuckDuckGo search result is: {search}"""
|
|
|
|
|
|
example_prompt = PromptTemplate(input_variables=["question", "search"], template=template)
|
|
|
prompt = example_prompt.format(question=question, search=search_result)
|
|
|
|
|
|
|
|
|
if button2:
|
|
|
if search_result:
|
|
|
response = get_llm_response(st.session_state["api_key"], prompt)
|
|
|
st.write(response)
|
|
|
else:
|
|
|
st.warning("Please enter a valid question to search.")
|
|
|
|