EngReem85 commited on
Commit
d3f5fa7
ยท
verified ยท
1 Parent(s): 12964eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -3,15 +3,15 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
  model_name = "MahmoudIbrahim/Meta-LLama3-Instruct-Arabic"
5
 
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(
8
  model_name,
9
- device_map="cpu",
10
- trust_remote_code=True
 
11
  )
12
 
13
-
14
-
15
  generator = pipeline(
16
  "text-generation",
17
  model=model,
@@ -20,21 +20,18 @@ generator = pipeline(
20
 
21
  def inspire(future, value, skill):
22
  if not future or not value or not skill:
23
- return "โš ๏ธ ุฑุฌุงุกู‹ ุฃูƒู…ู„ูŠ ุฌู…ูŠุน ุงู„ุญู‚ูˆู„."
24
-
25
  prompt = (
26
  f"ุงูƒุชุจ ุฌู…ู„ุฉ ุนุฑุจูŠุฉ ู‚ุตูŠุฑุฉ ูˆู…ู„ู‡ู…ุฉ ุชุตู ู…ุณุชู‚ุจู„ ุดุฎุต ูŠุฑูŠุฏ {future}ุŒ "
27
  f"ูˆูŠุคู…ู† ุจู€ {value}ุŒ ูˆูŠู…ุชู„ูƒ ู…ู‡ุงุฑุฉ {skill}. "
28
- "ู„ุชูƒู† ุงู„ุฌู…ู„ุฉ ูˆุงุถุญุฉุŒ ุฅู†ุณุงู†ูŠุฉุŒ ูˆู…ู„ูŠุฆุฉ ุจุงู„ุฃู…ู„."
29
  )
30
-
31
  try:
32
  result = generator(
33
  prompt,
34
  max_new_tokens=60,
35
  temperature=0.8,
36
- top_p=0.9,
37
- repetition_penalty=1.2
38
  )
39
  text = result[0]["generated_text"].strip()
40
  if text.startswith(prompt):
@@ -52,7 +49,7 @@ demo = gr.Interface(
52
  ],
53
  outputs=gr.Textbox(label="๐ŸŒŸ ุฑุคูŠุชูƒ ุงู„ู…ุณุชู‚ุจู„ูŠุฉ", lines=3),
54
  title="ุฐูƒุงุก ูŠุตู…ู… ู…ุณุชู‚ุจู„ูƒ ุจุงู„ุนุฑุจูŠุฉ",
55
- description="ุงุณุชุฎุฏู…ูŠ ู‡ุฐุง ุงู„ู†ู…ูˆุฐุฌ ุงู„ุนุฑุจูŠ ุงู„ู…ุชู‚ุฏู‘ู… ู„ูŠูˆู„ู‘ุฏ ู„ูƒ ุฌู…ู„ุฉ ู…ู„ู‡ู…ุฉ.",
56
  theme="soft"
57
  )
58
 
 
3
 
4
  model_name = "MahmoudIbrahim/Meta-LLama3-Instruct-Arabic"
5
 
6
+ # โœ… ู†ุญู…ู„ ุงู„ู†ู…ูˆุฐุฌ ุจุฏูˆู† 4bit ูˆุจุฏูˆู† quantization (ูŠุดุชุบู„ ุนู„ู‰ CPU)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
10
+ device_map="cpu", # ุชุดุบูŠู„ ุนู„ู‰ CPU
11
+ trust_remote_code=True, # ู„ู„ุณู…ุงุญ ุจุชุญู…ูŠู„ ุงู„ูƒูˆุฏ ุงู„ุฎุงุต ุจุงู„ู†ู…ูˆุฐุฌ
12
+ torch_dtype="float32" # ู†ุณุชุฎุฏู… ุฏู‚ุฉ ุนุงุฏูŠุฉ ุชู†ุงุณุจ CPU
13
  )
14
 
 
 
15
  generator = pipeline(
16
  "text-generation",
17
  model=model,
 
20
 
21
  def inspire(future, value, skill):
22
  if not future or not value or not skill:
23
+ return "โš ๏ธ ุฑุฌุงุกู‹ ุฃุฌูŠุจูŠ ุนู„ู‰ ุฌู…ูŠุน ุงู„ุญู‚ูˆู„."
 
24
  prompt = (
25
  f"ุงูƒุชุจ ุฌู…ู„ุฉ ุนุฑุจูŠุฉ ู‚ุตูŠุฑุฉ ูˆู…ู„ู‡ู…ุฉ ุชุตู ู…ุณุชู‚ุจู„ ุดุฎุต ูŠุฑูŠุฏ {future}ุŒ "
26
  f"ูˆูŠุคู…ู† ุจู€ {value}ุŒ ูˆูŠู…ุชู„ูƒ ู…ู‡ุงุฑุฉ {skill}. "
27
+ "ู„ุชูƒู† ุงู„ุฌู…ู„ุฉ ุฅู†ุณุงู†ูŠุฉ ูˆุดุงุนุฑูŠุฉ ูˆู…ู„ูŠุฆุฉ ุจุงู„ุฃู…ู„."
28
  )
 
29
  try:
30
  result = generator(
31
  prompt,
32
  max_new_tokens=60,
33
  temperature=0.8,
34
+ top_p=0.9
 
35
  )
36
  text = result[0]["generated_text"].strip()
37
  if text.startswith(prompt):
 
49
  ],
50
  outputs=gr.Textbox(label="๐ŸŒŸ ุฑุคูŠุชูƒ ุงู„ู…ุณุชู‚ุจู„ูŠุฉ", lines=3),
51
  title="ุฐูƒุงุก ูŠุตู…ู… ู…ุณุชู‚ุจู„ูƒ ุจุงู„ุนุฑุจูŠุฉ",
52
+ description="ู†ู…ูˆุฐุฌ ุนุฑุจูŠ ู…ุชู‚ุฏู‘ู… ูŠูƒุชุจ ุฌู…ู„ุฉ ู…ู„ู‡ู…ุฉ ุจู†ุงุกู‹ ุนู„ู‰ ู‚ูŠู…ูƒ ูˆุฃุญู„ุงู…ูƒ.",
53
  theme="soft"
54
  )
55