Spaces:
Sleeping
Sleeping
Update model.py
Browse files
model.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
def load_model(model_path="meta-llama/Meta-Llama-3-8B-Instruct"):
|
| 5 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
@@ -9,5 +13,6 @@ def load_model(model_path="meta-llama/Meta-Llama-3-8B-Instruct"):
|
|
| 9 |
model=model_path,
|
| 10 |
model_kwargs={"torch_dtype": torch.float16} if torch.cuda.is_available() else {},
|
| 11 |
device=device,
|
|
|
|
| 12 |
)
|
| 13 |
return pipe
|
|
|
|
| 1 |
import torch
|
| 2 |
from transformers import pipeline
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
hf_token = os.getenv("LLM_token")
|
| 6 |
+
os.environ["HUGGINGFACE_HUB_TOKEN"] = hf_token
|
| 7 |
|
| 8 |
def load_model(model_path="meta-llama/Meta-Llama-3-8B-Instruct"):
|
| 9 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
| 13 |
model=model_path,
|
| 14 |
model_kwargs={"torch_dtype": torch.float16} if torch.cuda.is_available() else {},
|
| 15 |
device=device,
|
| 16 |
+
token=hf_token
|
| 17 |
)
|
| 18 |
return pipe
|