Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,9 +1,16 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# Load model and tokenizer
|
| 6 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-70B-Instruct")
|
| 7 |
model = AutoModelForCausalLM.from_pretrained(
|
| 8 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 9 |
torch_dtype=torch.float16,
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
token = os.getenv("HF_TOKEN") # Safely fetch the token from environment
|
| 7 |
+
# tokenizer = AutoTokenizer.from_pretrained(
|
| 8 |
+
# "meta-llama/Meta-Llama-3-70B-Instruct",
|
| 9 |
+
# token=token # Use the token when loading the model
|
| 10 |
+
# )
|
| 11 |
|
| 12 |
# Load model and tokenizer
|
| 13 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-70B-Instruct", token=token)
|
| 14 |
model = AutoModelForCausalLM.from_pretrained(
|
| 15 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 16 |
torch_dtype=torch.float16,
|