Spaces:
Sleeping
Sleeping
Ved Gupta
commited on
Commit
·
be43a34
1
Parent(s):
b8cad3b
Add dependencies and download model file
Browse files- Dockerfile +2 -1
Dockerfile
CHANGED
|
@@ -1,11 +1,12 @@
|
|
| 1 |
FROM python:3.9-alpine
|
| 2 |
|
| 3 |
RUN apk add --no-cache build-base cmake git wget gcc g++ make
|
|
|
|
|
|
|
| 4 |
|
| 5 |
RUN mkdir models
|
| 6 |
RUN wget -q "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf" -O models/mistral-7b-instruct-v0.2.Q4_0.gguf
|
| 7 |
|
| 8 |
-
RUN pip install llama-cpp-python sse_starlette starlette_context pydantic_settings fastapi uvicorn
|
| 9 |
|
| 10 |
EXPOSE 8080
|
| 11 |
CMD ["python", "-m", "llama_cpp.server", "--model", "models/mistral-7b-instruct-v0.2.Q4_0.gguf"]
|
|
|
|
| 1 |
FROM python:3.9-alpine
|
| 2 |
|
| 3 |
RUN apk add --no-cache build-base cmake git wget gcc g++ make
|
| 4 |
+
RUN pip install llama-cpp-python sse_starlette starlette_context pydantic_settings fastapi uvicorn
|
| 5 |
+
|
| 6 |
|
| 7 |
RUN mkdir models
|
| 8 |
RUN wget -q "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf" -O models/mistral-7b-instruct-v0.2.Q4_0.gguf
|
| 9 |
|
|
|
|
| 10 |
|
| 11 |
EXPOSE 8080
|
| 12 |
CMD ["python", "-m", "llama_cpp.server", "--model", "models/mistral-7b-instruct-v0.2.Q4_0.gguf"]
|