Spaces:
Runtime error
Runtime error
Delete run_llama.py
Browse files- run_llama.py +0 -32
run_llama.py
DELETED
|
@@ -1,32 +0,0 @@
|
|
| 1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
def main():
|
| 5 |
-
model_id = "meta-llama/Llama-3.1-8B"
|
| 6 |
-
|
| 7 |
-
print("Chargement du tokenizer...")
|
| 8 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 9 |
-
|
| 10 |
-
print("Chargement du modèle...")
|
| 11 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
| 12 |
-
|
| 13 |
-
prompt = "Bonjour, je suis une IA super intelligente appelée io,"
|
| 14 |
-
|
| 15 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 16 |
-
|
| 17 |
-
print("Génération du texte...")
|
| 18 |
-
outputs = model.generate(
|
| 19 |
-
**inputs,
|
| 20 |
-
max_length=100,
|
| 21 |
-
do_sample=True,
|
| 22 |
-
temperature=0.7,
|
| 23 |
-
top_p=0.9,
|
| 24 |
-
num_return_sequences=1
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 28 |
-
print("\nTexte généré :\n", text)
|
| 29 |
-
|
| 30 |
-
if __name__ == "__main__":
|
| 31 |
-
main()
|
| 32 |
-
l
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|