JonusNattapong commited on
Commit
3a089e5
·
verified ·
1 Parent(s): 7f29093

Upload trained model

Browse files
MODEL_CARD.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # GPT4All-Model (Hanuman)
2
+
3
+ Model card placeholder. Provide details here: training data, license, evaluation, intended use, limitations.
4
+
5
+ - Name: GPT4All-Model
6
+ - Architecture: Hanuman (custom)
7
+ - Vocab size: see `config.json`
8
+ - License: add LICENSE file in repo root
9
+
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT4All-Model (Hanuman)
2
+
3
+ This folder contains the files needed to load and run the custom Hanuman model.
4
+
5
+ Included files:
6
+
7
+ - `pytorch_model.bin` — model weights
8
+ - `config.json` — model configuration
9
+ - `tokenizer.json`, `tokenizer_config.json`, `special_tokens_map.json` — tokenizer files
10
+ - `modeling.py` — custom `Hanuman` model implementation
11
+ - `hanuman_loader.py` — convenience loader (optional)
12
+
13
+ Quick usage (local files in this folder):
14
+
15
+ ```python
16
+ # inference_local.py
17
+ from transformers import AutoTokenizer
18
+ from modeling import Hanuman
19
+ import torch
20
+
21
+ # load tokenizer from local folder
22
+ tokenizer = AutoTokenizer.from_pretrained('.')
23
+ # load model using the provided helper
24
+ model = Hanuman.from_pretrained('.', map_location='cpu')
25
+
26
+ prompt = "สวัสดีครับ ช่วยอธิบายสั้น ๆ เกี่ยวกับประเทศไทย"
27
+ inputs = tokenizer(prompt, return_tensors='pt')
28
+ outputs = model.generate(inputs['input_ids'], max_new_tokens=50, temperature=1.2, top_k=50, top_p=0.95)
29
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
30
+ ```
31
+
32
+ Or load from the Hugging Face Hub (if this folder was uploaded to the hub as the repo root):
33
+
34
+ ```python
35
+ # inference_from_hub.py
36
+ from transformers import AutoTokenizer
37
+ from hanuman_loader import HanumanModel
38
+
39
+ repo_id = "ZombitX64/GPT4All-Model"
40
+ # tokenizer will download from HF
41
+ tokenizer = AutoTokenizer.from_pretrained(repo_id)
42
+ # HanumanModel downloads weights and modeling.py dynamically
43
+ model_wrapper = HanumanModel.from_pretrained(repo_id, map_location='cpu')
44
+ model = model_wrapper.model
45
+
46
+ prompt = "สวัสดีครับ ช่วยสรุปประเทศไทยสั้น ๆ"
47
+ inputs = tokenizer(prompt, return_tensors='pt')
48
+ outputs = model.generate(inputs['input_ids'], max_new_tokens=50)
49
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
50
+ ```
51
+
52
+ Notes:
53
+ - This repo uses a custom model class (`Hanuman`) — users must keep `modeling.py` or use the provided `hanuman_loader.py` that dynamically imports it.
54
+ - For CPU inference, install a CPU build of PyTorch. For GPU, install the appropriate CUDA-enabled PyTorch.
55
+
inference_from_hub.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ from hanuman_loader import HanumanModel
3
+
4
+
5
+ def run(prompt: str = "สวัสดีครับ ช่วยสรุปประเทศไทยสั้น ๆ"):
6
+ repo_id = "ZombitX64/GPT4All-Model"
7
+ tokenizer = AutoTokenizer.from_pretrained(repo_id)
8
+ model_wrapper = HanumanModel.from_pretrained(repo_id, map_location='cpu')
9
+ model = model_wrapper.model
10
+ inputs = tokenizer(prompt, return_tensors='pt')
11
+ out = model.generate(inputs['input_ids'], max_new_tokens=50, temperature=1.2, top_k=50, top_p=0.95)
12
+ print(tokenizer.decode(out[0], skip_special_tokens=True))
13
+
14
+
15
+ if __name__ == '__main__':
16
+ run()
inference_local.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ from modeling import Hanuman
3
+ import torch
4
+
5
+
6
+ def run(prompt: str = "สวัสดี"):
7
+ tokenizer = AutoTokenizer.from_pretrained('.')
8
+ model = Hanuman.from_pretrained('.', map_location='cpu')
9
+ inputs = tokenizer(prompt, return_tensors='pt')
10
+ out = model.generate(inputs['input_ids'], max_new_tokens=50, temperature=1.2, top_k=50, top_p=0.95)
11
+ print(tokenizer.decode(out[0], skip_special_tokens=True))
12
+
13
+
14
+ if __name__ == '__main__':
15
+ run("สวัสดีครับ ช่วยอธิบายประเทศไทยสั้น ๆ")
pytorch_model copy.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b56adef99dca3dbd97fe7df9deebe94df1525e7fa290dc7a211c0d37abbeb430
3
+ size 241866511
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ huggingface_hub
4
+ safetensors