Mitchell Kilpatrick SE2022 commited on
Commit
2b6b06d
·
1 Parent(s): f810166

App working

Browse files
Files changed (3) hide show
  1. Dockerfile +7 -10
  2. app.py +29 -3
  3. requirements.txt +6 -1
Dockerfile CHANGED
@@ -1,16 +1,13 @@
1
- # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
3
 
4
- FROM python:3.9
5
 
6
- RUN useradd -m -u 1000 user
7
- USER user
8
- ENV PATH="/home/user/.local/bin:$PATH"
9
 
10
- WORKDIR /app
 
 
11
 
12
- COPY --chown=user ./requirements.txt requirements.txt
13
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
- COPY --chown=user . /app
16
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ FROM python:3.10
 
2
 
3
+ WORKDIR /app
4
 
5
+ COPY requirements.txt .
 
 
6
 
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ COPY . .
10
 
11
+ EXPOSE 7860
 
12
 
 
13
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,7 +1,33 @@
1
  from fastapi import FastAPI
 
 
 
 
 
2
 
3
  app = FastAPI()
4
 
5
- @app.get("/")
6
- def greet_json():
7
- return {"Hello": "World!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ import torch
4
+ from transformers import AutoTokenizer, T5ForConditionalGeneration
5
+
6
+ MODEL_NAME = "google/byt5-small"
7
 
8
  app = FastAPI()
9
 
10
+ print("Loading model...")
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
+ model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
14
+ model.eval()
15
+
16
+ print("Model loaded.")
17
+
18
+ class TextRequest(BaseModel):
19
+ text: str
20
+
21
+
22
+ @app.post("/predict")
23
+ def predict(request: TextRequest):
24
+ prompt = f"Text: {request.text}\nIPA:"
25
+
26
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
27
+
28
+ with torch.no_grad():
29
+ outputs = model.generate(**inputs, max_new_tokens=64)
30
+
31
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ return {"ipa": result}
requirements.txt CHANGED
@@ -1,2 +1,7 @@
1
  fastapi
2
- uvicorn[standard]
 
 
 
 
 
 
1
  fastapi
2
+ uvicorn
3
+ torch
4
+ transformers
5
+ sentencepiece
6
+ safetensors
7
+ numpy<2