lainlives commited on
Commit
914f89a
·
verified ·
1 Parent(s): 3820d48

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. Dockerfile +32 -80
  2. README.md +2 -5
Dockerfile CHANGED
@@ -1,89 +1,41 @@
1
- FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu24.04
2
- ENV DEBIAN_FRONTEND=noninteractive
3
- RUN apt-get update && apt-get install -y
4
- RUN apt-get update && \
5
- apt-get upgrade -y && \
6
- apt-get install -y --no-install-recommends --fix-missing \
 
 
 
 
 
 
 
 
7
  git \
8
- git-lfs \
9
  wget \
10
- curl \
11
- cmake \
12
- build-essential \
13
- libssl-dev \
14
- zlib1g-dev \
15
- libbz2-dev \
16
- libreadline-dev \
17
- libsqlite3-dev \
18
- libncursesw5-dev \
19
- xz-utils \
20
- tk-dev \
21
- libxml2-dev \
22
- libxmlsec1-dev \
23
- libffi-dev \
24
- golang-go \
25
- python3 \
26
- liblzma-dev \
27
- ffmpeg \
28
- nvidia-driver-570 \
29
- python3 \
30
- python3-pip curl
31
-
32
-
33
- RUN id -u 1000 &>/dev/null || useradd -m -u 1000 user
34
- USER 1000
35
- ENV HOME=/home/user \
36
- PATH=/home/user/.local/bin:${PATH}
37
- WORKDIR ${HOME}/app
38
-
39
- RUN curl https://pyenv.run | bash
40
- ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
41
- ARG PYTHON_VERSION=3.13
42
- RUN pyenv install ${PYTHON_VERSION} && \
43
- pyenv global ${PYTHON_VERSION} && \
44
- pyenv rehash
45
- RUN pip install --no-cache-dir -U pip setuptools wheel
46
- RUN pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=6.5.1" "APScheduler" "protobuf>=4.21.0,<5.0.0" "sentencepiece>=0.1.98,<0.3.0" "numpy~=1.26.4" "gguf>=0.1.0" "fastapi"
47
-
48
- COPY --chown=1000 . ${HOME}/app
49
- RUN pip install "torch>=2.8.0"
50
- RUN pip install git+https://github.com/huggingface/transformers.git
51
-
52
-
53
 
 
 
 
 
 
54
 
 
55
 
56
- ENV PYTHONPATH=${HOME}/app \
57
- PYTHONUNBUFFERED=1 \
58
- HF_HUB_ENABLE_HF_TRANSFER=1 \
59
- GRADIO_ALLOW_FLAGGING=never \
60
- GRADIO_NUM_PORTS=1 \
61
- GRADIO_SERVER_NAME=0.0.0.0 \
62
- GRADIO_ANALYTICS_ENABLED=False \
63
- TQDM_POSITION=-1 \
64
- TQDM_MININTERVAL=1 \
65
- SYSTEM=spaces \
66
- LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
67
- PATH=/usr/local/nvidia/bin:${PATH}
68
 
69
- RUN cd ${HOME}/app && \
70
- git clone --recursive https://github.com/ollama/ollama.git && \
71
- cd ollama && \
72
- go generate ./... && \
73
- go build . && \
74
- ln -s $PWD/ollama /usr/bin/ollama && \
75
- chmod +x ollama && \
76
- cd ..
77
 
78
- RUN cd ${HOME}/app && \
79
- git clone --recursive https://github.com/ggerganov/llama.cpp && \
80
- cd llama.cpp && \
81
- cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF -DLLAMA_CURL=OFF && \
82
- cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix --parallel 42 && \
83
- cp ./build/bin/llama-* . && \
84
- rm -rf build && \
85
- cd ..
86
 
87
- EXPOSE 7860
88
 
89
- ENTRYPOINT /bin/bash start.sh
 
1
+ # Use Fedora 43 as base
2
+ FROM fedora:43
3
+
4
+ # ARG HF_TOKEN
5
+ #
6
+ # ENV HF_TOKEN=$HF_TOKEN
7
+
8
+ # Install dependencies for building RPMs
9
+ RUN dnf -y update && dnf -y install \
10
+ dnf-plugins-core \
11
+ rpm-build \
12
+ python3-devel \
13
+ gcc-c++ \
14
+ cmake \
15
  git \
16
+ golang \
17
  wget \
18
+ dnf-plugins-core \
19
+ make \
20
+ --setopt=install_weak_deps=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # Install all target Python versions
23
+ RUN dnf -y install \
24
+ python3.13-devel \
25
+ python3.13 \
26
+ --setopt=install_weak_deps=False
27
 
28
+ RUN dnf builddep python3-torch python3-torchvision python3-torchaudio -y
29
 
30
+ RUN wget https://developer.download.nvidia.com/compute/cuda/13.1.1/local_installers/cuda-repo-fedora42-13-1-local-13.1.1_590.48.01-1.x86_64.rpm && \
31
+ sudo rpm -i cuda-repo-fedora42-13-1-local-13.1.1_590.48.01-1.x86_64.rpm && \
32
+ sudo dnf clean all && \
33
+ sudo dnf -y install cuda-toolkit-13-1 nvidia-open && \
34
+ sudo dnf clean all
 
 
 
 
 
 
 
35
 
 
 
 
 
 
 
 
 
36
 
37
+ WORKDIR /app
 
 
 
 
 
 
 
38
 
39
+ COPY * .
40
 
41
+ CMD ["python" "start.py"]
README.md CHANGED
@@ -1,12 +1,9 @@
1
  ---
2
- title: HF to Ollama
3
  emoji: 📈
4
  colorFrom: gray
5
- colorTo: pink
6
  sdk: docker
7
  pinned: false
8
- suggested_hardware: "a10g-large"
9
  disable_embedding: true
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: bldr
3
  emoji: 📈
4
  colorFrom: gray
5
+ colorTo: gray
6
  sdk: docker
7
  pinned: false
 
8
  disable_embedding: true
9
  ---