belkacemm commited on
Commit
31816db
·
1 Parent(s): 54ea71b

added app.py

Browse files
Files changed (2) hide show
  1. app.py +46 -3
  2. requirements.txt +70 -0
app.py CHANGED
@@ -1,7 +1,50 @@
 
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
1
+ import torch
2
  import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # 🔁 CHANGE THIS
6
+ MODEL_ID = "Velkamez/kabyle-LLM"
7
+
8
+ # Load tokenizer & model
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ MODEL_ID,
12
+ torch_dtype=torch.float32, # CPU-safe
13
+ )
14
+ model.eval()
15
+
16
+ def chat(message, history):
17
+ # Build conversation prompt
18
+ prompt = ""
19
+ for user, assistant in history:
20
+ prompt += f"User: {user}\nAssistant: {assistant}\n"
21
+ prompt += f"User: {message}\nAssistant:"
22
+
23
+ inputs = tokenizer(prompt, return_tensors="pt")
24
+
25
+ with torch.no_grad():
26
+ output_ids = model.generate(
27
+ **inputs,
28
+ max_new_tokens=150,
29
+ temperature=0.8,
30
+ top_k=50,
31
+ do_sample=True,
32
+ )
33
+
34
+ decoded = tokenizer.decode(output_ids[0], skip_special_tokens=True)
35
+
36
+ # Take only the new assistant part
37
+ reply = decoded.split("Assistant:")[-1].strip()
38
+ return reply
39
+
40
+ demo = gr.ChatInterface(
41
+ fn=chat,
42
+ title="Kabyle / Tamazight LLM Demo",
43
+ description="Chat with a Kabyle / Tamazight language model.",
44
+ examples=[
45
+ "Azul amek i tellid?",
46
+ "Aru awal ɣef Taqbaylit",
47
+ ],
48
+ )
49
 
 
50
  demo.launch()
requirements.txt ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==24.1.0
2
+ annotated-doc==0.0.4
3
+ annotated-types==0.7.0
4
+ anyio==4.12.1
5
+ audioop-lts==0.2.2
6
+ brotli==1.2.0
7
+ certifi==2026.1.4
8
+ click==8.3.1
9
+ colorama==0.4.6
10
+ contourpy==1.3.3
11
+ cycler==0.12.1
12
+ fastapi==0.128.0
13
+ ffmpy==1.0.0
14
+ filelock==3.20.3
15
+ fonttools==4.61.1
16
+ fsspec==2026.1.0
17
+ gradio==6.5.1
18
+ gradio_client==2.0.3
19
+ groovy==0.1.2
20
+ h11==0.16.0
21
+ hf-xet==1.2.0
22
+ httpcore==1.0.9
23
+ httpx==0.28.1
24
+ huggingface_hub==1.3.7
25
+ idna==3.11
26
+ Jinja2==3.1.6
27
+ kiwisolver==1.4.9
28
+ markdown-it-py==4.0.0
29
+ MarkupSafe==3.0.3
30
+ matplotlib==3.10.8
31
+ mdurl==0.1.2
32
+ mpmath==1.3.0
33
+ networkx==3.6.1
34
+ numpy==2.4.1
35
+ orjson==3.11.7
36
+ packaging==26.0
37
+ pandas==3.0.0
38
+ pillow==12.1.0
39
+ protobuf==4.25.8
40
+ pydantic==2.12.5
41
+ pydantic_core==2.41.5
42
+ pydub==0.25.1
43
+ Pygments==2.19.2
44
+ pyparsing==3.3.2
45
+ python-dateutil==2.9.0.post0
46
+ python-multipart==0.0.22
47
+ pytz==2025.2
48
+ PyYAML==6.0.3
49
+ regex==2026.1.15
50
+ rich==14.3.2
51
+ safehttpx==0.1.7
52
+ safetensors==0.7.0
53
+ semantic-version==2.10.0
54
+ sentencepiece==0.2.1
55
+ setuptools==80.10.2
56
+ shellingham==1.5.4
57
+ six==1.17.0
58
+ starlette==0.50.0
59
+ sympy==1.14.0
60
+ tokenizers==0.22.2
61
+ tomlkit==0.13.3
62
+ torch==2.10.0
63
+ tqdm==4.67.3
64
+ transformers==5.1.0
65
+ typer==0.21.1
66
+ typer-slim==0.21.1
67
+ typing-inspection==0.4.2
68
+ typing_extensions==4.15.0
69
+ tzdata==2025.3
70
+ uvicorn==0.40.0