Upload 3 files
Browse files- Dockerfile +25 -0
- app.py +16 -0
- requirements.txt +5 -0
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.12 slim image
|
| 2 |
+
FROM python:3.12-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
build-essential \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Copy requirements first to leverage Docker cache
|
| 13 |
+
COPY requirements.txt .
|
| 14 |
+
|
| 15 |
+
# Install Python dependencies
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Copy the rest of the application
|
| 19 |
+
COPY . .
|
| 20 |
+
|
| 21 |
+
# Expose the port the app runs on
|
| 22 |
+
EXPOSE 8000
|
| 23 |
+
|
| 24 |
+
# Command to run the application
|
| 25 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
app.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
|
| 4 |
+
app = FastAPI()
|
| 5 |
+
|
| 6 |
+
# Initialize the pipeline with the correct model type
|
| 7 |
+
pipe = pipeline("text-generation", model="Qwen/Qwen2.5-VL-7B-Instruct", model_type="qwen2_5_vl")
|
| 8 |
+
|
| 9 |
+
@app.get("/")
|
| 10 |
+
def home():
|
| 11 |
+
return {"message": "FastAPI is running!"}
|
| 12 |
+
|
| 13 |
+
@app.get("/generate")
|
| 14 |
+
def generate(text: str):
|
| 15 |
+
output = pipe(text)
|
| 16 |
+
return {"output": output[0]['generated_text']}
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.110.0
|
| 2 |
+
transformers>=4.50.0
|
| 3 |
+
torch>=2.6.0
|
| 4 |
+
uvicorn==0.27.1
|
| 5 |
+
python-multipart==0.0.9
|