choco-conoz commited on
Commit
d28a2a4
·
1 Parent(s): 96bdea1

feat: light model test

Browse files
Files changed (2) hide show
  1. Dockerfile +3 -3
  2. src/streamlit_app.py +2 -2
Dockerfile CHANGED
@@ -10,9 +10,9 @@ RUN apt-get update && apt-get install -y \
10
  git \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
- ENV PYTHONUNBUFFERED=1 \
14
- PORT=8000 \
15
- HF_HOME=/home/user/huggingface
16
 
17
  COPY src/ ./src/
18
  # RUN pip3 install poetry==2.1.3
 
10
  git \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
+ # ENV PYTHONUNBUFFERED=1 \
14
+ # PORT=8000 \
15
+ # HF_HOME=/home/user/huggingface
16
 
17
  COPY src/ ./src/
18
  # RUN pip3 install poetry==2.1.3
src/streamlit_app.py CHANGED
@@ -5,12 +5,12 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
  # from huggingface_hub import notebook_login
6
  # from unsloth import FastLanguageModel, is_bfloat16_supported
7
 
8
- # model_id = "sentence-transformers/all-MiniLM-L6-v2"
9
  # model_id = "sentence-transformers/xlm-r-base-en-ko-nli-ststb"
10
 
11
  # model_id = "mistralai/Mistral-7B-Instruct-v0.1"
12
  # model_id = "meta-llama/Llama-3.2-1B"
13
- model_id = "choco-conoz/TwinLlama-3.1-8B"
14
 
15
  # processor = pipeline(
16
  # "text-generation",
 
5
  # from huggingface_hub import notebook_login
6
  # from unsloth import FastLanguageModel, is_bfloat16_supported
7
 
8
+ model_id = "sentence-transformers/all-MiniLM-L6-v2"
9
  # model_id = "sentence-transformers/xlm-r-base-en-ko-nli-ststb"
10
 
11
  # model_id = "mistralai/Mistral-7B-Instruct-v0.1"
12
  # model_id = "meta-llama/Llama-3.2-1B"
13
+ # model_id = "choco-conoz/TwinLlama-3.1-8B"
14
 
15
  # processor = pipeline(
16
  # "text-generation",