subbu123456 commited on
Commit
c755cab
·
verified ·
1 Parent(s): f73b709

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +7 -0
  2. app.py +31 -0
  3. requirements.txt +5 -0
  4. roberta_model.pkl +3 -0
Dockerfile ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ FROM python:3.10-slim
3
+ WORKDIR /app
4
+ COPY requirements.txt .
5
+ RUN pip install --no-cache-dir -r requirements.txt
6
+ COPY . .
7
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ import torch
4
+ import os
5
+ from transformers import RobertaTokenizer
6
+
7
+ class InputData(BaseModel):
8
+ text: str
9
+
10
+ app = FastAPI()
11
+
12
+ # Load model
13
+ model_path = os.path.join(os.path.dirname(__file__), "roberta_model.pkl")
14
+ model = torch.load(model_path, map_location=torch.device("cpu"))
15
+ model.eval()
16
+
17
+ # Load tokenizer
18
+ tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
19
+
20
+ @app.get("/")
21
+ async def root():
22
+ return {"message": "RoBERTa FastAPI Space is running!"}
23
+
24
+ @app.post("/predict")
25
+ async def predict(data: InputData):
26
+ inputs = tokenizer(data.text, return_tensors="pt", truncation=True, padding=True)
27
+ with torch.no_grad():
28
+ outputs = model(**inputs)
29
+ logits = outputs.logits
30
+ prediction = torch.argmax(logits, dim=1).item()
31
+ return {"prediction": prediction}
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ torch
4
+ transformers
5
+ pydantic
roberta_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83acce8cff053ecb1dd5f36bf2fad7bc51ee4720e1d17b0c43afa1b029264ec3
3
+ size 11