aloobun commited on
Commit
a444a73
·
verified ·
1 Parent(s): 1b24d29

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - tinycompany/shawty-CoT-Hindi-English
4
+ - tinycompany/Shawty-1.4B-SFT-Stage-1
5
+ library_name: transformers
6
+ tags:
7
+ - mergekit
8
+ - merge
9
+
10
+ ---
11
+ # MergedShawty-v0.4
12
+
13
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
14
+
15
+ ## Merge Details
16
+ ### Merge Method
17
+
18
+ This model was merged using the [SLERP](https://en.wikipedia.org/wiki/Slerp) merge method.
19
+
20
+ ### Models Merged
21
+
22
+ The following models were included in the merge:
23
+ * [tinycompany/shawty-CoT-Hindi-English](https://huggingface.co/tinycompany/shawty-CoT-Hindi-English)
24
+ * [tinycompany/Shawty-1.4B-SFT-Stage-1](https://huggingface.co/tinycompany/Shawty-1.4B-SFT-Stage-1)
25
+
26
+ ### Configuration
27
+
28
+ The following YAML configuration was used to produce this model:
29
+
30
+ ```yaml
31
+ base_model: tinycompany/Shawty-1.4B-SFT-Stage-1
32
+ merge_method: slerp
33
+ tokenizer_source: base
34
+ dtype: bfloat16
35
+ parameters:
36
+ t:
37
+ - filter: self_attn
38
+ value: [0.2, 0.6, 0.4, 0.8, 1.0]
39
+ - filter: mlp
40
+ value: [1.0, 0.6, 0.8, 0.4, 0.2]
41
+ - value: 0.3
42
+ slices:
43
+ - sources:
44
+ - model: tinycompany/Shawty-1.4B-SFT-Stage-1
45
+ layer_range: [0, 28]
46
+ - model: tinycompany/shawty-CoT-Hindi-English
47
+ layer_range: [0, 28]
48
+
49
+ ```
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tinycompany/Shawty-1.4B-SFT-Stage-1",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 131072,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "pad_token_id": 0,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.47.0",
27
+ "use_cache": false,
28
+ "use_mrope": false,
29
+ "use_sliding_window": false,
30
+ "vocab_size": 81920
31
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: tinycompany/Shawty-1.4B-SFT-Stage-1
2
+ merge_method: slerp
3
+ tokenizer_source: base
4
+ dtype: bfloat16
5
+ parameters:
6
+ t:
7
+ - filter: self_attn
8
+ value: [0.2, 0.6, 0.4, 0.8, 1.0]
9
+ - filter: mlp
10
+ value: [1.0, 0.6, 0.8, 0.4, 0.2]
11
+ - value: 0.3
12
+ slices:
13
+ - sources:
14
+ - model: tinycompany/Shawty-1.4B-SFT-Stage-1
15
+ layer_range: [0, 28]
16
+ - model: tinycompany/shawty-CoT-Hindi-English
17
+ layer_range: [0, 28]
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21c6ddf840b3a62e7aead5acf2c6126d231ef8c9ddb31403ce47f6e82b2b669
3
+ size 2872377928
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "4709": {
12
+ "content": "user",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "27673": {
20
+ "content": "system",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": false
26
+ },
27
+ "81912": {
28
+ "content": "<think>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "81913": {
36
+ "content": "</think>",
37
+ "lstrip": false,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": false
42
+ },
43
+ "81914": {
44
+ "content": "<|im_end|>",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "81915": {
52
+ "content": "<|im_start|>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "81916": {
60
+ "content": "assistant",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "81917": {
68
+ "content": "BiBo",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "81918": {
76
+ "content": "aloobun",
77
+ "lstrip": false,
78
+ "normalized": true,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ },
83
+ "81919": {
84
+ "content": "LowIQGenAI",
85
+ "lstrip": false,
86
+ "normalized": true,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": false
90
+ }
91
+ },
92
+ "chat_template": "\n{% set system_message = 'You are BiBo, a helpful and friendly AI assistant developed by aloobun and LowIQGenAI.' %}\n{%- if messages and messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + (messages[0]['content'] if 'content' in messages[0] else messages[0]['value'] if 'value' in messages[0] else '') + '<|im_end|>\\n' }}\n{%- else %}\n {{- '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}\n{%- endif %}\n{%- for message in messages %}\n {%- set content = message['content'] if 'content' in message else message['value'] if 'value' in message else '' %}\n {%- if message['role'] in ['user', 'human'] %}\n {{- '<|im_start|>user\\n' + content + '<|im_end|>\\n' }}\n {%- elif message['role'] in ['assistant', 'gpt'] %}\n {{- '<|im_start|>assistant\\n' + content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
93
+ "clean_up_tokenization_spaces": false,
94
+ "eos_token": "<|endoftext|>",
95
+ "extra_special_tokens": {},
96
+ "max_length": 2048,
97
+ "model_max_length": 8192,
98
+ "pad_to_multiple_of": null,
99
+ "pad_token": "<|endoftext|>",
100
+ "pad_token_type_id": 0,
101
+ "padding_side": "right",
102
+ "stride": 0,
103
+ "tokenizer_class": "PreTrainedTokenizerFast",
104
+ "truncation_side": "right",
105
+ "truncation_strategy": "longest_first"
106
+ }