FastFlowLM commited on
Commit
86ea534
·
verified ·
1 Parent(s): 45f63af

Upload initial model

Browse files
Files changed (10) hide show
  1. .gitattributes +7 -0
  2. attn.xclbin +3 -0
  3. config.json +41 -0
  4. dequant.xclbin +3 -0
  5. layer.xclbin +3 -0
  6. lm_head.xclbin +3 -0
  7. mm.xclbin +3 -0
  8. model.q4nx +3 -0
  9. tokenizer.json +3 -0
  10. tokenizer_config.json +0 -0
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ attn.xclbin filter=lfs diff=lfs merge=lfs -text
37
+ dequant.xclbin filter=lfs diff=lfs merge=lfs -text
38
+ layer.xclbin filter=lfs diff=lfs merge=lfs -text
39
+ lm_head.xclbin filter=lfs diff=lfs merge=lfs -text
40
+ mm.xclbin filter=lfs diff=lfs merge=lfs -text
41
+ model.q4nx filter=lfs diff=lfs merge=lfs -text
42
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
attn.xclbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c7b047ee2300b0614bbb08817a319cc673195b99ceb3ea0a877c3291c2372bf
3
+ size 456971
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attn_logit_softcapping": null,
8
+ "bos_token_id": 2,
9
+ "cache_implementation": "hybrid",
10
+ "eos_token_id": [
11
+ 1,
12
+ 106
13
+ ],
14
+ "final_logit_softcapping": null,
15
+ "head_dim": 256,
16
+ "hidden_activation": "gelu_pytorch_tanh",
17
+ "hidden_size": 1152,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 6912,
20
+ "max_position_embeddings": 32768,
21
+ "model_type": "gemma3_text_only",
22
+ "num_attention_heads": 4,
23
+ "num_hidden_layers": 26,
24
+ "num_key_value_heads": 1,
25
+ "pad_token_id": 0,
26
+ "query_pre_attn_scalar": 256,
27
+ "rms_norm_eps": 1e-06,
28
+ "rope_local_base_freq": 10000,
29
+ "rope_scaling": null,
30
+ "rope_theta": 1000000,
31
+ "sliding_window": 512,
32
+ "sliding_window_pattern": 6,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.50.0.dev0",
35
+ "use_cache": true,
36
+ "vocab_size": 262144,
37
+ "addr_qk": 9216,
38
+ "addr_kv": 34816,
39
+ "addr_kk": 13312,
40
+ "flm_version": "0.9.4"
41
+ }
dequant.xclbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b529e9c91b0d9def4c5394a21920c0ba01f3d83568dde40f04e4f27034cc714
3
+ size 103915
layer.xclbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a4f03948734154b299af2c3aae96745f93f6fd976b7ba50bb82ccda2558746a
3
+ size 173707
lm_head.xclbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bd1d261343485684abf317c13c6d3eb581c79260437f39d979632b117dadea
3
+ size 143115
mm.xclbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2962eefe324053f9a3dac247d1ec7f27737a76eeb9e346fc1296ca1269a22ccb
3
+ size 218699
model.q4nx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d85e9e4e0f1ef68c1f51536fdad0fafd48935f1b24cf6db692e1dc0ec6254a40
3
+ size 1229131768
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff