Alleinzellgaenger commited on
Commit
808de23
·
1 Parent(s): b779421

DOCKERFILEEEEEEEEEE

Browse files
Files changed (2) hide show
  1. Dockerfile +11 -20
  2. backend/app.py +24 -1
Dockerfile CHANGED
@@ -1,28 +1,19 @@
1
  # Use an official Python runtime as a base image
2
- FROM python:3.9-slim
3
 
4
- # Prevent Python from writing .pyc files and enable unbuffered logging
5
- ENV PYTHONDONTWRITEBYTECODE=1
6
- ENV PYTHONUNBUFFERED=1
7
 
8
- # Set HF_HOME to a writable cache directory
9
- ENV HF_HOME=/app/.cache/huggingface
10
 
11
- # Create the cache directory and ensure it is writable
12
- RUN mkdir -p /app/.cache/huggingface && chmod -R 777 /app/.cache/huggingface
13
 
14
- # Set the working directory inside the container
15
- WORKDIR /app
 
 
16
 
17
- # Copy and install backend dependencies
18
- COPY backend/requirements.txt /app/backend/requirements.txt
19
- RUN pip install --upgrade pip && pip install -r /app/backend/requirements.txt
20
 
21
- # Copy the entire project into the container
22
- COPY . /app
23
 
24
- # Expose port 8000 (the port our app will run on)
25
- EXPOSE 7680
26
-
27
- # Run the FastAPI app using Uvicorn
28
- CMD ["uvicorn", "backend.app:app", "--host", "0.0.0.0", "--port", "7680"]
 
1
  # Use an official Python runtime as a base image
2
+ FROM python:3.9
3
 
4
+ WORKDIR /code
 
 
5
 
6
+ COPY backend/requirements.txt /code/backend/requirements.txt
 
7
 
8
+ RUN pip install --no-cache-dir --upgrade -r /code/backend/requirements.txt
 
9
 
10
+ RUN useradd -m -u 1000 user
11
+ USER user
12
+ ENV HOME=/home/user
13
+ ENV PATH=/home/user/.local/bin:$PATH
14
 
15
+ WORKDIR $HOME/app
 
 
16
 
17
+ COPY --chown=user . $HOME/app
 
18
 
19
+ CMD ["uvicorn", "backend.app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
backend/app.py CHANGED
@@ -1,7 +1,7 @@
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
4
- from transformers import GPT2Tokenizer, GPT2Model
5
  import torch as t
6
  import logging
7
 
@@ -62,3 +62,26 @@ async def process_text(text: str):
62
  except Exception as e:
63
  logger.error(f"Error processing text: {e}")
64
  raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
  from fastapi.staticfiles import StaticFiles
4
+ from transformers import GPT2Tokenizer, GPT2Model, pipeline
5
  import torch as t
6
  import logging
7
 
 
62
  except Exception as e:
63
  logger.error(f"Error processing text: {e}")
64
  raise HTTPException(status_code=500, detail=str(e))
65
+
66
+ # Initialize the text generation pipeline
67
+ # This function will be able to generate text
68
+ # given an input.
69
+ pipe = pipeline("text2text-generation",
70
+ model="google/flan-t5-small")
71
+
72
+ # Define a function to handle the GET request at `/generate`
73
+ # The generate() function is defined as a FastAPI route that takes a
74
+ # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
75
+ # containing the generated text under the key "output"
76
+ @app.get("/generate")
77
+ def generate(text: str):
78
+ """
79
+ Using the text2text-generation pipeline from `transformers`, generate text
80
+ from the given input text. The model used is `google/flan-t5-small`, which
81
+ can be found [here](<https://huggingface.co/google/flan-t5-small>).
82
+ """
83
+ # Use the pipeline to generate text from the given input text
84
+ output = pipe(text)
85
+
86
+ # Return the generated text in a JSON response
87
+ return {"output": output[0]["generated_text"]}