animaslabs commited on
Commit
91d2308
·
verified ·
1 Parent(s): 85072b8

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ - en
5
+ library_name: transformers
6
+ license: mit
7
+ pipeline_tag: image-text-to-text
8
+ tags:
9
+ - mlx
10
+ ---
11
+
12
+ # animaslabs/GLM-4.6V-Flash-4bit
13
+ This model was converted to MLX format from [`zai-org/GLM-4.6V-Flash`]() using mlx-vlm version **0.3.12**.
14
+ Refer to the [original model card](https://huggingface.co/zai-org/GLM-4.6V-Flash) for more details on the model.
15
+ ## Use with mlx
16
+
17
+ ```bash
18
+ pip install -U mlx-vlm
19
+ ```
20
+
21
+ ```bash
22
+ python -m mlx_vlm.generate --model animaslabs/GLM-4.6V-Flash-4bit --max-tokens 100 --temperature 0.0 --prompt "Describe this image." --image <path_to_image>
23
+ ```
chat_template.jinja ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]<sop>
2
+ {%- if tools -%}
3
+ <|system|>
4
+ # Tools
5
+
6
+ You may call one or more functions to assist with the user query.
7
+
8
+ You are provided with function signatures within <tools></tools> XML tags:
9
+ <tools>
10
+ {% for tool in tools %}
11
+ {{ tool | tojson(ensure_ascii=False) }}
12
+ {% endfor %}
13
+ </tools>
14
+
15
+ For each function call, output the function name and arguments within the following XML format:
16
+ <tool_call>{function-name}
17
+ <arg_key>{arg-key-1}</arg_key>
18
+ <arg_value>{arg-value-1}</arg_value>
19
+ <arg_key>{arg-key-2}</arg_key>
20
+ <arg_value>{arg-value-2}</arg_value>
21
+ ...
22
+ </tool_call>{%- endif -%}
23
+ {%- macro visible_text(content) -%}
24
+ {%- if content is string -%}
25
+ {{- content }}
26
+ {%- elif content is iterable and content is not mapping -%}
27
+ {%- for item in content -%}
28
+ {%- if item is mapping and item.type == 'text' -%}
29
+ {{- item.text }}
30
+ {%- elif item is mapping and (item.type == 'image' or 'image' in item) -%}
31
+ <|begin_of_image|><|image|><|end_of_image|>
32
+ {%- elif item is mapping and (item.type == 'video' or 'video' in item) -%}
33
+ <|begin_of_video|><|video|><|end_of_video|>
34
+ {%- elif item is string -%}
35
+ {{- item }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{- content }}
40
+ {%- endif -%}
41
+ {%- endmacro -%}
42
+ {%- set ns = namespace(last_user_index=-1) %}
43
+ {%- for m in messages %}
44
+ {%- if m.role == 'user' %}
45
+ {% set ns.last_user_index = loop.index0 -%}
46
+ {%- endif %}
47
+ {%- endfor %}
48
+ {% for m in messages %}
49
+ {%- if m.role == 'user' -%}<|user|>
50
+ {% if m.content is string %}
51
+ {{ m.content }}
52
+ {%- else %}
53
+ {%- for item in m.content %}
54
+ {% if item.type == 'video' or 'video' in item %}
55
+ <|begin_of_video|><|video|><|end_of_video|>{% elif item.type == 'image' or 'image' in item %}
56
+ <|begin_of_image|><|image|><|end_of_image|>{% elif item.type == 'text' %}
57
+ {{ item.text }}
58
+ {%- endif %}
59
+ {%- endfor %}
60
+ {%- endif %}
61
+ {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
62
+ {%- elif m.role == 'assistant' -%}
63
+ <|assistant|>
64
+ {%- set reasoning_content = '' %}
65
+ {%- set content = visible_text(m.content) %}
66
+ {%- if m.reasoning_content is string %}
67
+ {%- set reasoning_content = m.reasoning_content %}
68
+ {%- else %}
69
+ {%- if '</think>' in content %}
70
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
71
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
72
+ {%- endif %}
73
+ {%- endif %}
74
+ {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
75
+ {{ '\n<think>' + reasoning_content.strip() + '</think>'}}
76
+ {%- else -%}
77
+ {{ '\n<think></think>' }}
78
+ {%- endif -%}
79
+ {%- if content.strip() -%}
80
+ {{ '\n' + content.strip() }}
81
+ {%- endif -%}
82
+ {% if m.tool_calls %}
83
+ {% for tc in m.tool_calls %}
84
+ {%- if tc.function %}
85
+ {%- set tc = tc.function %}
86
+ {%- endif %}
87
+ {{ '\n<tool_call>' + tc.name }}
88
+ {% set _args = tc.arguments %}
89
+ {% for k, v in _args.items() %}
90
+ <arg_key>{{ k }}</arg_key>
91
+ <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
92
+ {% endfor %}
93
+ </tool_call>{% endfor %}
94
+ {% endif %}
95
+ {%- elif m.role == 'tool' -%}
96
+ {%- if m.content is string -%}
97
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
98
+ {{- '<|observation|>' }}
99
+ {%- endif %}
100
+ {{- '\n<tool_response>\n' }}
101
+ {{- m.content }}
102
+ {{- '\n</tool_response>' }}
103
+ {% elif m.content is iterable and m.content is not mapping %}
104
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
105
+ {{- '<|observation|>' }}
106
+ {%- endif %}
107
+ {{- '\n<tool_response>\n' }}
108
+ {%- for tr in m.content -%}
109
+ {%- if tr is mapping and tr.type is defined -%}
110
+ {%- set t = tr.type | lower -%}
111
+ {%- if t == 'text' and tr.text is defined -%}
112
+ {{ tr.text }}
113
+ {%- elif t in ['image', 'image_url'] -%}
114
+ <|begin_of_image|><|image|><|end_of_image|>
115
+ {%- elif t in ['video', 'video_url'] -%}
116
+ <|begin_of_video|><|video|><|end_of_video|>
117
+ {%- else -%}
118
+ {{ tr | tojson(ensure_ascii=False) }}
119
+ {%- endif -%}
120
+ {%- else -%}
121
+ {{ tr.output if tr.output is defined else tr }}
122
+ {%- endif -%}
123
+ {%- endfor -%}
124
+ {{- '\n</tool_response>' }}
125
+ {%- else -%}
126
+ <|observation|>{% for tr in m.content %}
127
+
128
+ <tool_response>
129
+ {{ tr.output if tr.output is defined else tr }}
130
+ </tool_response>{% endfor -%}
131
+ {% endif -%}
132
+ {%- elif m.role == 'system' -%}
133
+ <|system|>
134
+ {{ visible_text(m.content) }}
135
+ {%- endif -%}
136
+ {%- endfor -%}
137
+ {%- if add_generation_prompt -%}
138
+ <|assistant|>
139
+ {{'<think></think>\n' if (enable_thinking is defined and not enable_thinking) else ''}}
140
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4vForConditionalGeneration"
4
+ ],
5
+ "eos_token_id": [
6
+ 151329,
7
+ 151336,
8
+ 151338,
9
+ 151348
10
+ ],
11
+ "image_end_token_id": 151340,
12
+ "image_start_token_id": 151339,
13
+ "image_token_id": 151363,
14
+ "model_type": "glm4v",
15
+ "quantization": {
16
+ "group_size": 64,
17
+ "bits": 4,
18
+ "mode": "affine"
19
+ },
20
+ "quantization_config": {
21
+ "group_size": 64,
22
+ "bits": 4,
23
+ "mode": "affine"
24
+ },
25
+ "text_config": {
26
+ "attention_bias": true,
27
+ "attention_dropout": 0.0,
28
+ "dtype": "bfloat16",
29
+ "eos_token_id": [
30
+ 151329,
31
+ 151336,
32
+ 151338
33
+ ],
34
+ "hidden_act": "silu",
35
+ "hidden_size": 4096,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 13696,
38
+ "max_position_embeddings": 131072,
39
+ "model_type": "glm4v_text",
40
+ "num_attention_heads": 32,
41
+ "num_hidden_layers": 40,
42
+ "num_key_value_heads": 2,
43
+ "pad_token_id": 151329,
44
+ "rms_norm_eps": 1e-05,
45
+ "rope_parameters": {
46
+ "mrope_section": [
47
+ 8,
48
+ 12,
49
+ 12
50
+ ],
51
+ "partial_rotary_factor": 0.5,
52
+ "rope_theta": 500000,
53
+ "rope_type": "default"
54
+ },
55
+ "use_cache": true,
56
+ "vocab_size": 151552
57
+ },
58
+ "tie_word_embeddings": false,
59
+ "transformers_version": "5.0.0rc0",
60
+ "video_end_token_id": 151342,
61
+ "video_start_token_id": 151341,
62
+ "video_token_id": 151364,
63
+ "vision_config": {
64
+ "attention_bias": false,
65
+ "attention_dropout": 0.0,
66
+ "depth": 24,
67
+ "hidden_act": "silu",
68
+ "hidden_dropout_prob": 0.0,
69
+ "hidden_size": 1536,
70
+ "image_size": 336,
71
+ "in_channels": 3,
72
+ "initializer_range": 0.02,
73
+ "intermediate_size": 13696,
74
+ "model_type": "glm4v_vision",
75
+ "num_heads": 12,
76
+ "out_hidden_size": 4096,
77
+ "patch_size": 14,
78
+ "rms_norm_eps": 1e-05,
79
+ "spatial_merge_size": 2,
80
+ "temporal_patch_size": 2
81
+ }
82
+ }
generation_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151329,
6
+ 151336,
7
+ 151338,
8
+ 151348
9
+ ],
10
+ "pad_token_id": 151329,
11
+ "top_p": 0.6,
12
+ "temperature": 0.8,
13
+ "top_k": 2,
14
+ "transformers_version": "4.57.1"
15
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1070400b73b939d66b23430bc967605e78c6a2e8998b0244be76825970aca575
3
+ size 5367706660
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f108750978956604628172d94c058651bee690e5ca9904a021bc0671aa8accd5
3
+ size 1706311094
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {"shortest_edge": 12544, "longest_edge": 9633792},
3
+ "do_rescale": true,
4
+ "patch_size": 14,
5
+ "temporal_patch_size": 2,
6
+ "merge_size": 2,
7
+ "image_mean": [0.48145466, 0.4578275, 0.40821073],
8
+ "image_std": [0.26862954, 0.26130258, 0.27577711],
9
+ "image_processor_type": "Glm46VImageProcessor",
10
+ "processor_class": "Glm46VProcessor"
11
+ }
processor_config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor": {
3
+ "data_format": "channels_first",
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "image_mean": [
9
+ 0.48145466,
10
+ 0.4578275,
11
+ 0.40821073
12
+ ],
13
+ "image_processor_type": "Glm46VImageProcessorFast",
14
+ "image_std": [
15
+ 0.26862954,
16
+ 0.26130258,
17
+ 0.27577711
18
+ ],
19
+ "merge_size": 2,
20
+ "patch_size": 14,
21
+ "resample": 3,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "longest_edge": 9633792,
25
+ "shortest_edge": 12544
26
+ },
27
+ "temporal_patch_size": 2
28
+ },
29
+ "processor_class": "Glm46VProcessor",
30
+ "video_processor": {
31
+ "data_format": "channels_first",
32
+ "default_to_square": true,
33
+ "do_convert_rgb": true,
34
+ "do_normalize": true,
35
+ "do_rescale": true,
36
+ "do_resize": true,
37
+ "do_sample_frames": true,
38
+ "fps": 2,
39
+ "image_mean": [
40
+ 0.48145466,
41
+ 0.4578275,
42
+ 0.40821073
43
+ ],
44
+ "image_std": [
45
+ 0.26862954,
46
+ 0.26130258,
47
+ 0.27577711
48
+ ],
49
+ "max_duration": 300,
50
+ "max_image_size": {
51
+ "longest_edge": 47040000
52
+ },
53
+ "merge_size": 2,
54
+ "num_frames": 16,
55
+ "patch_size": 14,
56
+ "resample": 3,
57
+ "rescale_factor": 0.00392156862745098,
58
+ "return_metadata": false,
59
+ "size": {
60
+ "longest_edge": 100352000,
61
+ "shortest_edge": 12544
62
+ },
63
+ "temporal_patch_size": 2,
64
+ "video_processor_type": "Glm46VVideoProcessor"
65
+ }
66
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda8e2146c3bb7b7e0fc96dcc4f0aeff041c6c27952e3ace0665663ebff346ba
3
+ size 19970700
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "clean_up_tokenization_spaces": false,
4
+ "do_lower_case": false,
5
+ "eos_token": "<|endoftext|>",
6
+ "is_local": true,
7
+ "model_max_length": 128000,
8
+ "pad_token": "<|endoftext|>",
9
+ "padding_side": "left",
10
+ "processor_class": "Glm46VProcessor",
11
+ "remove_space": false,
12
+ "tokenizer_class": "TokenizersBackend"
13
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {"shortest_edge": 12544, "longest_edge": 100352000},
3
+ "do_rescale": true,
4
+ "patch_size": 14,
5
+ "temporal_patch_size": 2,
6
+ "merge_size": 2,
7
+ "image_mean": [0.48145466, 0.4578275, 0.40821073],
8
+ "image_std": [0.26862954, 0.26130258, 0.27577711],
9
+ "video_processor_type": "Glm46VVideoProcessor",
10
+ "processor_class": "Glm46VProcessor"
11
+ }