Naveen934 commited on
Commit
d8fbad4
·
verified ·
1 Parent(s): 9aa2472

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +28 -0
  2. app.py +108 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a smaller base image
2
+ FROM python:3.9-slim
3
+
4
+ # Install system dependencies
5
+ RUN apt-get update && apt-get install -y --no-install-recommends \
6
+ gcc \
7
+ make \
8
+ libffi-dev \
9
+ && apt-get clean \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Set working directory
13
+ WORKDIR /app
14
+
15
+ # Copy only requirements first for caching
16
+ COPY requirements.txt .
17
+
18
+ # Install dependencies
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # Copy the rest of the app files
22
+ COPY app.py .
23
+
24
+ # Expose the port that Streamlit runs on
25
+ EXPOSE 8501
26
+
27
+ # Run the app
28
+ CMD ["streamlit", "run", "app.py"]
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain_core.prompts import PromptTemplate
6
+ import os
7
+
8
+ # Streamlit page configuration
9
+ st.set_page_config(page_title="AI Tool", page_icon=":robot:")
10
+ st.title("GPT Clone")
11
+ st.sidebar.title("Select your LLM Model")
12
+
13
+ # Sidebar to select the model
14
+ model = st.sidebar.selectbox("Please select any model:",
15
+ ("Gemini", "Mistral", "Llama"),
16
+ placeholder="Select your LLM model...")
17
+
18
+ st.write("Your LLM Model is:", model)
19
+
20
+ # Initialize API key state
21
+ if "api_key" not in st.session_state:
22
+ st.session_state["api_key"] = ''
23
+
24
+ # Function to get API key input from the user
25
+ def get_api_key():
26
+ if model == "Gemini":
27
+ st.session_state["api_key"] = st.sidebar.text_input("Please enter your Gemini API key", type='password')
28
+ else:
29
+ st.session_state["api_key"] = st.sidebar.text_input("Please enter your HuggingFace API key", type='password')
30
+ return st.session_state["api_key"]
31
+
32
+ # Function to interact with HuggingFace models
33
+ def invoke_hugging_llm(model_name, api_key, prompt):
34
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = api_key
35
+ llm = HuggingFaceEndpoint(repo_id=model_name)
36
+ response = llm.invoke(prompt)
37
+ return response
38
+
39
+ import logging
40
+ logging.basicConfig(level=logging.DEBUG)
41
+
42
+ def get_llm_response(api_key, prompt):
43
+ logging.debug(f"Using API key: {api_key}")
44
+ logging.debug(f"Prompt: {prompt}")
45
+ try:
46
+ if model == "Mistral":
47
+ model_name = "mistralai/Mistral-7B-Instruct-v0.3"
48
+ response = invoke_hugging_llm(model_name, api_key, prompt)
49
+ elif model == "Llama":
50
+ model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
51
+ response = invoke_hugging_llm(model_name, api_key, prompt)
52
+ elif model == "Gemini":
53
+ os.environ['GOOGLE_API_KEY'] = api_key
54
+ genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
55
+ llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
56
+ response = llm.invoke(prompt)
57
+ logging.debug(f"Gemini response: {response}")
58
+ return response.content
59
+ except Exception as e:
60
+ logging.error(f"Error invoking model: {e}")
61
+ raise e
62
+ return response
63
+
64
+ # Get API key
65
+ api_key = get_api_key()
66
+
67
+ # Display success message if API key is provided
68
+ if api_key:
69
+ st.success("API Key Acquired")
70
+
71
+ # Text input for user's question
72
+ question = st.text_input("Ask your question")
73
+
74
+ # Button to submit the question
75
+ button2 = st.button("Submit")
76
+
77
+ # Search tool integration (DuckDuckGo)
78
+ from phi.assistant import Assistant
79
+ from phi.tools.duckduckgo import DuckDuckGo
80
+
81
+ # Initialize the search tool
82
+ search_tool = Assistant(tools=[DuckDuckGo()], show_tool_calls=True)
83
+
84
+ # Check if a question has been entered
85
+ search_result = None
86
+ if question:
87
+ try:
88
+ search_result = search_tool.run(question) # Adjusted from 'print_response' to 'run'
89
+ except Exception as e:
90
+ st.error(f"Error performing search: {str(e)}")
91
+ search_result = "No search results found."
92
+
93
+ # Create the prompt using the search result
94
+ template = """You are an AI assistant. Provide relevant answers to the user's question.
95
+ The user's question is: {question}.
96
+ If the user asks about current affairs, use the DuckDuckGo search result as context.
97
+ The DuckDuckGo search result is: {search}"""
98
+
99
+ example_prompt = PromptTemplate(input_variables=["question", "search"], template=template)
100
+ prompt = example_prompt.format(question=question, search=search_result)
101
+
102
+ # If the submit button is clicked, get the response from the selected model
103
+ if button2:
104
+ if search_result:
105
+ response = get_llm_response(st.session_state["api_key"], prompt)
106
+ st.write(response)
107
+ else:
108
+ st.warning("Please enter a valid question to search.")
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ langchain_community==0.3.0
2
+ langchain_core==0.3.5
3
+ langchain_google_genai==2.0.0
4
+ phi==0.6.7
5
+ phidata==2.4.20
6
+ protobuf>=3.20,<6
7
+ streamlit==1.37.0
8
+ duckduckgo_search==6.2.11b1
9
+ huggingface_hub==0.25.1