Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,15 @@ model_name = "meta-llama/Llama-3.2-1B"
|
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# Define the inference function
|
| 10 |
def generate_text(prompt, max_length=100, temperature=0.7):
|
| 11 |
inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 8 |
|
| 9 |
+
# Fetch API token from environment
|
| 10 |
+
api_token = os.getenv("Llama_Token")
|
| 11 |
+
|
| 12 |
+
# Authenticate
|
| 13 |
+
login(api_token)
|
| 14 |
+
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=api_token)
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=api_token)
|
| 17 |
+
|
| 18 |
# Define the inference function
|
| 19 |
def generate_text(prompt, max_length=100, temperature=0.7):
|
| 20 |
inputs = tokenizer(prompt, return_tensors="pt")
|