Spaces:
No application file
No application file
Update download_model.py
Browse files- download_model.py +3 -5
download_model.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
| 1 |
import torch
|
| 2 |
-
import huggingface_hub
|
| 3 |
from transformers import (AutoTokenizer,
|
| 4 |
BitsAndBytesConfig,
|
| 5 |
MBart50TokenizerFast,
|
| 6 |
AutoModelForSeq2SeqLM,
|
| 7 |
MBartForConditionalGeneration)
|
|
|
|
| 8 |
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
|
@@ -26,10 +27,7 @@ def download_model(model_name: str):
|
|
| 26 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="en_XX")
|
| 27 |
return model, tokenizer
|
| 28 |
elif model_name == "Llama-3.2-1B-Instruct":
|
| 29 |
-
|
| 30 |
-
str2 = "AFSBqvApwHjMQuTOALqZKRpRBzEUL"
|
| 31 |
-
token = "h"+str1+str2
|
| 32 |
-
huggingface_hub.login(token = token)
|
| 33 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct").to(device)
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
| 35 |
return model, tokenizer
|
|
|
|
| 1 |
import torch
|
| 2 |
+
import huggingface_hub import login
|
| 3 |
from transformers import (AutoTokenizer,
|
| 4 |
BitsAndBytesConfig,
|
| 5 |
MBart50TokenizerFast,
|
| 6 |
AutoModelForSeq2SeqLM,
|
| 7 |
MBartForConditionalGeneration)
|
| 8 |
+
from config import hf_token
|
| 9 |
|
| 10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 11 |
|
|
|
|
| 27 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="en_XX")
|
| 28 |
return model, tokenizer
|
| 29 |
elif model_name == "Llama-3.2-1B-Instruct":
|
| 30 |
+
login(token = hf_token)
|
|
|
|
|
|
|
|
|
|
| 31 |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct").to(device)
|
| 32 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
| 33 |
return model, tokenizer
|