rxmha125 commited on
Commit
d95d4a2
·
verified ·
1 Parent(s): b16a084

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +21 -0
  2. app.py +121 -0
  3. requirements.txt +10 -0
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python base image
2
+ FROM python:3.11-slim
3
+
4
+ # Set the working directory inside the container
5
+ WORKDIR /code
6
+
7
+ # Copy the requirements file and install dependencies
8
+ # We use --index-url for the CPU version of torch to keep it smaller
9
+ RUN echo "torch --index-url https://download.pytorch.org/whl/cpu" > /code/requirements.txt
10
+ COPY ./requirements.txt /code/temp_requirements.txt
11
+ RUN cat /code/temp_requirements.txt >> /code/requirements.txt && rm /code/temp_requirements.txt
12
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
13
+
14
+ # Copy the rest of the application code
15
+ COPY ./app.py /code/app.py
16
+
17
+ # Expose the port the app runs on (FastAPI default is 8000)
18
+ EXPOSE 8000
19
+
20
+ # Command to run the application using uvicorn
21
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import logging
4
+ import time
5
+ from contextlib import asynccontextmanager
6
+ import torch
7
+ from fastapi import FastAPI, HTTPException
8
+ from pydantic import BaseModel
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+
11
+ # --- Configuration ---
12
+ # Your Hugging Face model repository
13
+ HF_REPO_ID = "rxmha125/RxCodexV1-mini"
14
+ # Use GPU if available (CUDA), otherwise fallback to CPU
15
+ MODEL_LOAD_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
16
+
17
+ # --- Logging Setup ---
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # --- Global variables to hold the model and tokenizer ---
22
+ # These will be loaded during the application's startup.
23
+ model = None
24
+ tokenizer = None
25
+
26
+ # --- Application Lifespan (Model Loading) ---
27
+ # This special function runs code when the API starts up and shuts down.
28
+ @asynccontextmanager
29
+ async def lifespan(app: FastAPI):
30
+ global model, tokenizer
31
+ logger.info(f"API Startup: Loading model '{HF_REPO_ID}' to device '{MODEL_LOAD_DEVICE}'...")
32
+
33
+ # Load the tokenizer from Hugging Face
34
+ try:
35
+ tokenizer = AutoTokenizer.from_pretrained(HF_REPO_ID)
36
+ # Set a padding token if it's not already set
37
+ if tokenizer.pad_token is None and tokenizer.eos_token:
38
+ tokenizer.pad_token = tokenizer.eos_token
39
+ logger.info("✅ Tokenizer loaded successfully.")
40
+ except Exception as e:
41
+ logger.error(f"❌ FATAL: Tokenizer loading failed: {e}")
42
+
43
+ # Load the model from Hugging Face
44
+ try:
45
+ model = AutoModelForCausalLM.from_pretrained(HF_REPO_ID)
46
+ model.to(MODEL_LOAD_DEVICE) # Move the model to the correct device (CPU or GPU)
47
+ model.eval() # Set the model to evaluation mode (important for inference)
48
+ logger.info("✅ Model loaded successfully.")
49
+ except Exception as e:
50
+ logger.error(f"❌ FATAL: Model loading failed: {e}")
51
+
52
+ yield # The API is now running
53
+
54
+ # --- Code below this line runs on shutdown ---
55
+ logger.info("API Shutting down.")
56
+ model = None
57
+ tokenizer = None
58
+
59
+
60
+ # --- Initialize FastAPI ---
61
+ # The 'lifespan' function is linked here to ensure the model loads on startup.
62
+ app = FastAPI(
63
+ title="Rx Codex V1-mini Simple API",
64
+ description="A simplified API for text generation without authentication.",
65
+ lifespan=lifespan
66
+ )
67
+
68
+ # --- Pydantic Models for API Data Validation ---
69
+ # Defines the structure of the incoming request body
70
+ class GenerationRequest(BaseModel):
71
+ prompt: str
72
+ max_new_tokens: int = 50
73
+
74
+ # Defines the structure of the outgoing response
75
+ class GenerationResponse(BaseModel):
76
+ generated_text: str
77
+
78
+ # --- API Endpoints ---
79
+ @app.get("/")
80
+ def root():
81
+ """A simple endpoint to check if the API is running."""
82
+ status = "loaded" if model and tokenizer else "not loaded"
83
+ return {"message": "Rx Codex API is running", "model_status": status}
84
+
85
+ @app.post("/generate", response_model=GenerationResponse)
86
+ async def generate_text(request: GenerationRequest):
87
+ """The main endpoint to generate text from a prompt."""
88
+ if not model or not tokenizer:
89
+ raise HTTPException(status_code=503, detail="Model is not ready. Please try again later.")
90
+
91
+ logger.info(f"Received generation request for prompt: '{request.prompt}'")
92
+
93
+ # Prepare the input text for the model
94
+ inputs = tokenizer(request.prompt, return_tensors="pt", padding=True, truncation=True)
95
+ input_ids = inputs["input_ids"].to(MODEL_LOAD_DEVICE)
96
+
97
+ # Generate text using the model
98
+ with torch.no_grad():
99
+ output_sequences = model.generate(
100
+ input_ids=input_ids,
101
+ max_new_tokens=request.max_new_tokens,
102
+ pad_token_id=tokenizer.pad_token_id,
103
+ )
104
+
105
+ # Decode the generated tokens back into text
106
+ generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
107
+
108
+ # Simple cleanup to remove the original prompt from the output
109
+ if generated_text.lower().startswith(request.prompt.lower()):
110
+ generated_text = generated_text[len(request.prompt):].strip()
111
+
112
+ logger.info("Generation complete.")
113
+ return GenerationResponse(generated_text=generated_text)
114
+
115
+
116
+ # --- Uvicorn Runner ---
117
+ # This allows you to run the app directly with 'python app.py'
118
+ if __name__ == "__main__":
119
+ import uvicorn
120
+ logger.info("Starting API via Uvicorn...")
121
+ uvicorn.run(app, host="0.0.0.0", port=8000)
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
2
+
3
+ fastapi
4
+ uvicorn[standard]
5
+ transformers
6
+ sentencepiece
7
+
8
+ # Install the CPU-only version of PyTorch for smaller size and faster install
9
+ # If you have a powerful NVIDIA GPU, you can remove this line and install the CUDA version
10
+ torch --index-url https://download.pytorch.org/whl/cpu