from transformers import pipeline

gen = pipeline("text-generation", model="iko-01/ARABIC_poetry2", tokenizer="iko-01/ARABIC_poetry2", device=0 if torch.cuda.is_available() else -1)

# ุถุน ู‡ู†ุง ุฃู…ุซู„ุฉ ุงู„ุงุฎุชุจุงุฑ (ูƒู„ู…ุงุช ู…ูุฑุฏุฉ ุฃูˆ ุฌู…ู„ ู‚ุตูŠุฑุฉ) โ€” ูŠู…ูƒู†ูƒ ุชุนุฏูŠู„ู‡ุง ุฃูˆ ุฅุถุงูุฉ ุงู„ู…ุฒูŠุฏ
prompts = [
    "ูŠุง ู„ูŽุทูŠููŽ",
    "ู‚ูŽุฏู’ ุณู…ุนู’ุชู",
    "ุญูŠู†ูŽ ุงุดุชุฏู‘ูŽ",
    "ูŠุง ุฐุงูƒุฑุฉูŽ",
    "ู‚ูŽู„ุจูŠ",
    "ููŠ ุงู„ู„ูŠู„ู",
]

results = []
for p in prompts:
    out = gen(p,
              max_new_tokens=150,
              do_sample=True,
              top_k=50,
              top_p=0.95,
              temperature=0.8,
              num_return_sequences=1)
    text = out[0]["generated_text"]
    # ู†ุฑูŠุฏ ูู‚ุท ุงู„ุฌุฒุก ุงู„ุฌุฏูŠุฏ ุจุนุฏ ุงู„ู€ prompt (ุฃูˆ ูŠู…ูƒู† ุฅุฎุฑุงุฌ ุงู„ุณุทุฑ ูƒู„ู‡)
    # ู‡ู†ุง ู†ุญุชูุธ ุจุงู„ู†ุต ุงู„ูƒุงู…ู„ ู„ู…ุฑูˆู†ุฉ ุงู„ุนุฑุถ
    results.append({"prompt": p, "output": text})

# ุนุฑุถ ุณุฑูŠุน
for r in results:
    print("PROMPT:", r["prompt"])
    print("OUTPUT:", r["output"])
    print("-"*40)
Downloads last month
18
Safetensors
Model size
0.1B params
Tensor type
F32
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for iko-01/ARABIC_poetry2

Finetuned
(1)
this model