Spaces:
Sleeping
Sleeping
Commit
·
5189bbe
1
Parent(s):
5c28d53
added writable cache working directory
Browse files- Dockerfile +2 -0
- load_meta_data.py +1 -0
Dockerfile
CHANGED
|
@@ -2,6 +2,8 @@ FROM python:3.11-slim
|
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
|
|
|
|
|
|
| 5 |
COPY requirements.txt /app/requirements.txt
|
| 6 |
|
| 7 |
RUN pip install --no-cache-dir --upgrade -r /app/requirements.txt
|
|
|
|
| 2 |
|
| 3 |
WORKDIR /app
|
| 4 |
|
| 5 |
+
ENV TRANSFORMERS_CACHE /app/.cache/huggingface/hub
|
| 6 |
+
|
| 7 |
COPY requirements.txt /app/requirements.txt
|
| 8 |
|
| 9 |
RUN pip install --no-cache-dir --upgrade -r /app/requirements.txt
|
load_meta_data.py
CHANGED
|
@@ -25,6 +25,7 @@ class ChatBot:
|
|
| 25 |
inputs = self.tokenizer(text,return_tensors='pt')
|
| 26 |
outputs = self.model.generate(**inputs,
|
| 27 |
max_new_tokens = 100,
|
|
|
|
| 28 |
)
|
| 29 |
response = self.tokenizer.decode(outputs[0],skip_special_tokens=True)
|
| 30 |
response = self.get_clean_response(response)
|
|
|
|
| 25 |
inputs = self.tokenizer(text,return_tensors='pt')
|
| 26 |
outputs = self.model.generate(**inputs,
|
| 27 |
max_new_tokens = 100,
|
| 28 |
+
# add extra parameters if models runs successfully
|
| 29 |
)
|
| 30 |
response = self.tokenizer.decode(outputs[0],skip_special_tokens=True)
|
| 31 |
response = self.get_clean_response(response)
|