Egor commited on
Commit
b2c222f
·
1 Parent(s): 863e656
Files changed (3) hide show
  1. Dockerfile +8 -13
  2. app.py +26 -6
  3. requirements.txt +6 -2
Dockerfile CHANGED
@@ -1,17 +1,12 @@
1
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
3
-
4
  FROM python:3.9
5
-
 
 
6
  RUN useradd -m -u 1000 user
7
-
8
- WORKDIR /app
9
-
10
- COPY --chown=user ./requirements.txt requirements.txt
11
-
12
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
13
-
14
- COPY --chown=user . /app
15
-
16
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
17
 
 
 
 
 
 
1
  FROM python:3.9
2
+ WORKDIR /src
3
+ COPY ./requirements.txt /src/requirements.txt
4
+ RUN pip install --no-cache-dir --upgrade -r /src/requirements.txt
5
  RUN useradd -m -u 1000 user
6
+ USER user
7
+ ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
8
+ WORKDIR $HOME/app
9
+ COPY --chown=user . $HOME/app
 
 
 
 
 
10
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
11
 
12
+
app.py CHANGED
@@ -1,8 +1,28 @@
1
-
2
  from fastapi import FastAPI
3
-
 
 
4
  app = FastAPI()
5
-
6
- @app.get("/")
7
- def greet_json():
8
- return {"Hello": "World!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from fastapi import FastAPI
2
+ from transformers import pipeline
3
+
4
+ # Create a new FastAPI app instance
5
  app = FastAPI()
6
+
7
+ # Initialize the text generation pipeline
8
+ # This function will be able to generate text
9
+ # given an input.
10
+ pipe = pipeline("text2text-generation",
11
+ model="google/flan-t5-small")
12
+
13
+ # Define a function to handle the GET request at `/generate`
14
+ # The generate() function is defined as a FastAPI route that takes a
15
+ # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
16
+ # containing the generated text under the key "output"
17
+ @app.get("/generate")
18
+ def generate(text: str):
19
+ """
20
+ Using the text2text-generation pipeline from `transformers`, generate text
21
+ from the given input text. The model used is `google/flan-t5-small`, which
22
+ can be found [here](<https://huggingface.co/google/flan-t5-small>).
23
+ """
24
+ # Use the pipeline to generate text from the given input text
25
+ output = pipe(text)
26
+
27
+ # Return the generated text in a JSON response
28
+ return {"output": output[0]["generated_text"]}
requirements.txt CHANGED
@@ -1,2 +1,6 @@
1
- fastapi
2
- uvicorn[standard]
 
 
 
 
 
1
+ fastapi==0.74.*
2
+ requests==2.27.*
3
+ uvicorn[standard]==0.17.*
4
+ sentencepiece==0.1.*
5
+ torch==1.11.*
6
+ transformers==4.*