add model 7b
Browse files- Dockerfile +2 -2
- Dockerfile-13b +1 -1
- Dockerfile-7b +2 -2
Dockerfile
CHANGED
|
@@ -17,8 +17,8 @@ RUN pip install -r requriments.txt \
|
|
| 17 |
&& git lfs install \
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
-
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.2.
|
| 21 |
-
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|
|
|
|
| 17 |
&& git lfs install \
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
+
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.2.1 \
|
| 21 |
+
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ --low-cpu-mem \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|
Dockerfile-13b
CHANGED
|
@@ -18,7 +18,7 @@ RUN pip install -r requriments-13b.txt \
|
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.1.10 \
|
| 21 |
-
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|
|
|
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.1.10 \
|
| 21 |
+
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ --low-cpu-mem \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|
Dockerfile-7b
CHANGED
|
@@ -17,8 +17,8 @@ RUN pip install -r requriments.txt \
|
|
| 17 |
&& git lfs install \
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
-
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.2.
|
| 21 |
-
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|
|
|
|
| 17 |
&& git lfs install \
|
| 18 |
&& git clone https://huggingface.co/lmsys/${vicuna_diff} \
|
| 19 |
&& git clone https://huggingface.co/decapoda-research/${llama_version} \
|
| 20 |
+
&& pip install git+https://github.com/lm-sys/FastChat.git@v0.2.1 \
|
| 21 |
+
&& python -m fastchat.model.apply_delta --base ${dir}/${llama_version}/ --target ${dir}/vicuna_out --delta ${dir}/${vicuna_diff}/ --low-cpu-mem \
|
| 22 |
&& sed -i -e '16c\ \ llama_model: "/usr/local/src/MiniGPT-4/vicuna_out"' ${dir}/minigpt4/configs/models/minigpt4.yaml
|
| 23 |
|
| 24 |
RUN adduser --disabled-password --gecos '' user
|