Upload nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.py with huggingface_hub
Browse files
nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.py
CHANGED
|
@@ -1,14 +1,8 @@
|
|
| 1 |
# /// script
|
| 2 |
# requires-python = ">=3.12"
|
| 3 |
# dependencies = [
|
| 4 |
-
# "numpy",
|
| 5 |
-
# "einops",
|
| 6 |
-
# "torch",
|
| 7 |
# "transformers",
|
| 8 |
-
# "
|
| 9 |
-
# "datasets",
|
| 10 |
-
# "accelerate",
|
| 11 |
-
# "timm",
|
| 12 |
# ]
|
| 13 |
# ///
|
| 14 |
|
|
@@ -21,10 +15,6 @@ try:
|
|
| 21 |
{"role": "user", "content": "Who are you?"},
|
| 22 |
]
|
| 23 |
pipe(messages)
|
| 24 |
-
|
| 25 |
-
# Load model directly
|
| 26 |
-
from transformers import AutoModelForCausalLM
|
| 27 |
-
model = AutoModelForCausalLM.from_pretrained("nvidia/Llama-3_3-Nemotron-Super-49B-v1_5", trust_remote_code=True, torch_dtype="auto"),
|
| 28 |
with open('nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.txt', 'w') as f:
|
| 29 |
f.write('Everything was good in nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.txt')
|
| 30 |
except Exception as e:
|
|
|
|
| 1 |
# /// script
|
| 2 |
# requires-python = ">=3.12"
|
| 3 |
# dependencies = [
|
|
|
|
|
|
|
|
|
|
| 4 |
# "transformers",
|
| 5 |
+
# "torch",
|
|
|
|
|
|
|
|
|
|
| 6 |
# ]
|
| 7 |
# ///
|
| 8 |
|
|
|
|
| 15 |
{"role": "user", "content": "Who are you?"},
|
| 16 |
]
|
| 17 |
pipe(messages)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
with open('nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.txt', 'w') as f:
|
| 19 |
f.write('Everything was good in nvidia_Llama-3_3-Nemotron-Super-49B-v1_5_0.txt')
|
| 20 |
except Exception as e:
|