Sumail commited on
Commit
dedb0c3
·
verified ·
1 Parent(s): 07dabca

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. config.json +30 -21
  2. merges.txt +0 -0
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +1 -0
  5. vocab.json +0 -0
config.json CHANGED
@@ -1,29 +1,38 @@
1
  {
2
- "_name_or_path": "encrypt_v2",
3
- "_attn_implementation": "eager",
4
  "architectures": [
5
- "LlamaForCausalLM"
6
  ],
7
- "attention_bias": true,
8
- "attention_dropout": 0.0,
9
  "bos_token_id": 50256,
 
10
  "eos_token_id": 50256,
11
- "hidden_act": "silu",
12
- "hidden_size": 2048,
 
13
  "initializer_range": 0.02,
14
- "intermediate_size": 6112,
15
- "max_position_embeddings": 1024,
16
- "model_type": "llama",
17
- "num_attention_heads": 32,
18
- "num_hidden_layers": 12,
19
- "num_key_value_heads": 4,
20
- "pretraining_tp": 1,
21
- "rms_norm_eps": 1e-05,
22
- "rope_scaling": null,
23
- "rope_theta": 10000.0,
24
- "tie_word_embeddings": false,
25
- "torch_dtype": "float32",
26
- "transformers_version": "4.38.0",
27
- "use_cache": false,
 
 
 
 
 
 
 
 
28
  "vocab_size": 50257
29
  }
 
1
  {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
  "architectures": [
5
+ "GPT2LMHeadModel"
6
  ],
7
+ "attn_pdrop": 0.1,
 
8
  "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
  "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_layer": 6,
24
+ "n_positions": 1024,
25
+ "resid_pdrop": 0.1,
26
+ "summary_activation": null,
27
+ "summary_first_dropout": 0.1,
28
+ "summary_proj_to_labels": true,
29
+ "summary_type": "cls_index",
30
+ "summary_use_proj": true,
31
+ "task_specific_params": {
32
+ "text-generation": {
33
+ "do_sample": true,
34
+ "max_length": 50
35
+ }
36
+ },
37
  "vocab_size": 50257
38
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 1024}
vocab.json ADDED
The diff for this file is too large to render. See raw diff