Upload 3 files
Browse files- Dockerfile +13 -0
- main.py +40 -0
- requirements.txt +3 -0
Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Install dependencies first for layer caching
|
| 6 |
+
COPY requirements.txt .
|
| 7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 8 |
+
|
| 9 |
+
# Copy application files
|
| 10 |
+
COPY . .
|
| 11 |
+
|
| 12 |
+
# Run Chainlit on port 7860 (mandatory for Hugging Face)
|
| 13 |
+
CMD ["chainlit", "run", "main.py", "--port", "7860", "--headless"]
|
main.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Import required libraries
|
| 2 |
+
import os # for environment variables
|
| 3 |
+
import chainlit as cl # for chatbot interface
|
| 4 |
+
import google.generativeai as genai # for google genai api
|
| 5 |
+
from dotenv import load_dotenv # for loading environment variables
|
| 6 |
+
|
| 7 |
+
# Load environment variables from .env file
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
# Get Gemini API key from environment variables
|
| 11 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
| 12 |
+
|
| 13 |
+
# Configure Gemini API with the API key
|
| 14 |
+
genai.configure(api_key=gemini_api_key)
|
| 15 |
+
|
| 16 |
+
# Initialize Gemini model
|
| 17 |
+
model = genai.GenerativeModel(model_name="gemini-2.0-flash")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# chainlit decorator for when a new chat session starts
|
| 21 |
+
@cl.on_chat_start
|
| 22 |
+
async def handle_chat_start():
|
| 23 |
+
# Send welcome message to user
|
| 24 |
+
await cl.Message(content="Hello! how can I help you today?").send()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# chainlit decorator for when a new message is received
|
| 28 |
+
@cl.on_message
|
| 29 |
+
async def handle_message(message: cl.Message):
|
| 30 |
+
# Get the message content from user
|
| 31 |
+
prompt = message.content
|
| 32 |
+
|
| 33 |
+
# Generate response using Gemini model
|
| 34 |
+
response = model.generate_content(prompt)
|
| 35 |
+
|
| 36 |
+
# Extract text from response, or empty string if no text attribute
|
| 37 |
+
response_text = response.text if hasattr(response, "text") else ""
|
| 38 |
+
|
| 39 |
+
# Send response back to user
|
| 40 |
+
await cl.Message(content=response_text).send()
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
chainlit
|
| 2 |
+
google-generativeai
|
| 3 |
+
python-dotenv
|