Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,14 +4,13 @@ from huggingface_hub import InferenceClient
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 5 |
import torch
|
| 6 |
import os
|
| 7 |
-
from dotenv import load_dotenv
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
|
| 12 |
model_id = "meta-llama/Llama-3.2-1B" # small enough to run locally on CPU
|
| 13 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 14 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 15 |
|
| 16 |
|
| 17 |
def chat(prompt):
|
|
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 5 |
import torch
|
| 6 |
import os
|
|
|
|
| 7 |
|
| 8 |
+
hf_token = os.getenv("HF_TOKEN")
|
| 9 |
+
login(token=hf_token)
|
| 10 |
|
| 11 |
model_id = "meta-llama/Llama-3.2-1B" # small enough to run locally on CPU
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
|
| 13 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token)
|
| 14 |
|
| 15 |
|
| 16 |
def chat(prompt):
|