esteele commited on
Commit
c954778
·
1 Parent(s): 283c634

Initial Commit

Browse files
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:latest
2
+ LABEL authors="Carla"
3
+ FROM python:3.13
4
+
5
+ WORKDIR /code
6
+
7
+ COPY requirements.txt .
8
+ RUN pip install --no-cache-dir -r requirements.txt
9
+
10
+ COPY . .
11
+
12
+ # Expose FastAPI on port 7860 (required for Spaces)
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
14
+
15
+
16
+ ENTRYPOINT ["top", "-b"]
app/__init__.py ADDED
File without changes
app/api/__init__.py ADDED
File without changes
app/api/v1/__init__.py ADDED
File without changes
app/api/v1/captions.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from starlette.responses import JSONResponse
3
+ from app.services.ai_service import generate_captions
4
+
5
+
6
+ router = APIRouter()
7
+ @router.get("/captions")
8
+ async def get_captions(prompt: str):
9
+ """
10
+ Example: /api/v1/captions?prompt=My+dog+is+funny
11
+ """
12
+
13
+ try:
14
+ captions = generate_captions(prompt)
15
+ return {"captions": captions}
16
+ except Exception as e:
17
+ return JSONResponse(content={"error": str(e)}, status_code=500)
18
+ # @router.get("/captions")
19
+ # async def get_captions(prompt: str):
20
+ # try:
21
+ # captions = generate_captions(prompt)
22
+ # return {"captions": captions}
23
+ # except Exception as e:
24
+ # return JSONResponse(content={"error": str(e)}, status_code=500)
app/api/v1/upload.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, UploadFile, File
2
+ from starlette.responses import JSONResponse
3
+
4
+ from app.utils.file_utils import save_upload_file
5
+
6
+ router = APIRouter()
7
+
8
+ @router.post("/upload")
9
+ async def upload_file(file: UploadFile = File(...)):
10
+ try:
11
+ file_path = save_upload_file(file)
12
+ return JSONResponse(content={"message": "File uploaded", "path": file_path})
13
+ except Exception as e:
14
+ return JSONResponse(content={"error": str(e)}, status_code=500)
15
+
app/core/__init__.py ADDED
File without changes
app/core/config.py ADDED
File without changes
app/main.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from starlette.middleware.cors import CORSMiddleware
3
+
4
+ from .api.v1 import upload, captions
5
+
6
+ app = FastAPI(title="AI Meme Generator")
7
+
8
+ # Register routes
9
+
10
+ app.include_router(upload.router, prefix="/api/v1", tags=["upload"])
11
+ app.include_router(captions.router, prefix="/api/v1", tags=["captions"])
12
+ @app.get("/")
13
+ async def read_root():
14
+ return {"message": "AI Meme Generator is running 🚀"}
15
+
16
+
17
+ # ✅ Enable CORS so frontend can talk to backend
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=['http://localhost:3000'],
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
app/models/__init__.py ADDED
File without changes
app/models/meme.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class Meme(BaseModel):
5
+ caption: str
6
+ image_url: str
7
+
8
+ class CaptionRequest(BaseModel):
9
+ prompt: str
10
+ max_length: int = 50
11
+ num_return_sequences: int = 3
app/services/__init__.py ADDED
File without changes
app/services/ai_service.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+
3
+ MODEL_NAME = "bickett/meme-llama"
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
6
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
7
+
8
+
9
+ # def generate_meme_caption(image_path: str, max_length: int = 40, num_return_sequences: int = 3):
10
+ # # Step 1: get descriptive caption
11
+ # image = Image.open(image_path).convert("RGB")
12
+ # description = image_captioner(image)[0]['generated_text']
13
+ #
14
+ # # Step 2: twist it into a meme-style funny caption
15
+ # prompt = f"Turn this into a funny meme caption:\n'{description}'\nMeme caption:"
16
+ # inputs = tokenizer(prompt, return_tensors="pt")
17
+ #
18
+ # outputs = model.generate(
19
+ # **inputs,
20
+ # max_length=max_length,
21
+ # num_return_sequences=num_return_sequences,
22
+ # do_sample=True,
23
+ # top_p=0.95,
24
+ # temperature=0.9
25
+ # )
26
+ #
27
+ # captions = [tokenizer.decode(out, skip_special_tokens=True) for out in outputs]
28
+ # return captions
29
+
30
+
31
+ # def generate_captions(req: CaptionRequest):
32
+ # inputs = tokenizer(req.prompt, return_tensors="pt")
33
+ # outputs = model.generate(
34
+ # **inputs,
35
+ # max_length=req.max_length,
36
+ # num_return_sequences=req.num_return_sequences,
37
+ # do_sample=True,
38
+ # top_p=0.95,
39
+ # temperature=0.8,
40
+ # )
41
+ # captions = [tokenizer.decode(out, skip_special_tokens=True) for out in outputs]
42
+ # return {"captions": captions}
43
+
44
+ def generate_captions(prompt: str, max_length: int = 50, num_return_sequences: int = 3):
45
+ """
46
+ Generate AI meme captions given a prompt using Meme-LLaMA
47
+ """
48
+ inputs = tokenizer(prompt, return_tensors="pt")
49
+ outputs = model.generate(
50
+ **inputs,
51
+ max_length=max_length,
52
+ num_return_sequences=num_return_sequences,
53
+ do_sample=True,
54
+ top_p=0.95,
55
+ temperature=0.8
56
+ )
57
+
58
+ captions = [tokenizer.decode(out, skip_special_tokens=True) for out in outputs]
59
+ return captions
60
+
61
+ # def generate_captions(prompt: str):
62
+ # return [
63
+ # f"{prompt} but make it funny :D",
64
+ # f"When you realize {prompt} was a mistake...",
65
+ # f"{prompt} vibes only 🚀",
66
+ # ]
app/services/image_service.py ADDED
File without changes
app/utils/__init__.py ADDED
File without changes
app/utils/file_utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+
4
+ from fastapi import UploadFile
5
+
6
+ UPLOAD_DIR = "uploads"
7
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
8
+
9
+ def save_upload_file(file: UploadFile) -> str:
10
+ file_path = os.path.join(UPLOAD_DIR, file.filename)
11
+ with open(file_path, "wb") as buffer:
12
+ shutil.copyfileobj(file.file, buffer)
13
+ # print(f"File path: {file_path.split(',')[0]}{file_path.split(',')[-1]}")
14
+ return file_path
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ transformers
4
+ torch