lewtun HF Staff commited on
Commit
0525b0f
·
1 Parent(s): c6ab1e0

Add None checkpoint

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: distilgpt2-ift
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # distilgpt2-ift
14
+
15
+ This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 44.2744
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 12
38
+ - eval_batch_size: 12
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: cosine
42
+ - training_steps: 1
43
+ - mixed_precision_training: Native AMP
44
+
45
+ ### Training results
46
+
47
+ | Training Loss | Epoch | Step | Validation Loss |
48
+ |:-------------:|:-----:|:----:|:---------------:|
49
+ | 41.4703 | 0.0 | 1 | 44.2744 |
50
+
51
+
52
+ ### Framework versions
53
+
54
+ - Transformers 4.28.1
55
+ - Pytorch 2.0.1+cu118
56
+ - Datasets 2.12.0
57
+ - Tokenizers 0.13.3
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 50259,
3
+ "<|end|>": 50260,
4
+ "<|system|>": 50257,
5
+ "<|user|>": 50258
6
+ }
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "eval_loss": 44.2744026184082,
4
+ "eval_runtime": 2.6297,
5
+ "eval_samples": 437,
6
+ "eval_samples_per_second": 166.177,
7
+ "eval_steps_per_second": 14.07,
8
+ "perplexity": 1.6909421376838556e+19,
9
+ "train_loss": 41.470333099365234,
10
+ "train_runtime": 8.8744,
11
+ "train_samples": 4005,
12
+ "train_samples_per_second": 1.352,
13
+ "train_steps_per_second": 0.113
14
+ }
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_epsilon": 1e-05,
20
+ "model_type": "gpt2",
21
+ "n_ctx": 1024,
22
+ "n_embd": 768,
23
+ "n_head": 12,
24
+ "n_inner": null,
25
+ "n_layer": 6,
26
+ "n_positions": 1024,
27
+ "reorder_and_upcast_attn": false,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_by_inverse_layer_idx": false,
30
+ "scale_attn_weights": true,
31
+ "summary_activation": null,
32
+ "summary_first_dropout": 0.1,
33
+ "summary_proj_to_labels": true,
34
+ "summary_type": "cls_index",
35
+ "summary_use_proj": true,
36
+ "task_specific_params": {
37
+ "text-generation": {
38
+ "do_sample": true,
39
+ "max_length": 50
40
+ }
41
+ },
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.28.1",
44
+ "use_cache": false,
45
+ "vocab_size": 50261
46
+ }
dialogue_template.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "system": "",
3
+ "messages": null,
4
+ "system_token": "<|system|>",
5
+ "system_format": "standard",
6
+ "user_token": "<|user|>",
7
+ "assistant_token": "<|assistant|>",
8
+ "end_token": "<|end|>",
9
+ "mid_str": "\n",
10
+ "end_str": "\n",
11
+ "extra_end_text": ""
12
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "eval_loss": 44.2744026184082,
4
+ "eval_runtime": 2.6297,
5
+ "eval_samples": 437,
6
+ "eval_samples_per_second": 166.177,
7
+ "eval_steps_per_second": 14.07,
8
+ "perplexity": 1.6909421376838556e+19
9
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.28.1"
6
+ }
handler.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+
6
+ from peft import PeftConfig, PeftModel
7
+
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path=""):
11
+ # load model and processor from path
12
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
13
+ try:
14
+ config = PeftConfig.from_pretrained(path)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ config.base_model_name_or_path,
17
+ return_dict=True,
18
+ load_in_8bit=True,
19
+ device_map="auto",
20
+ torch_dtype=torch.float16,
21
+ trust_remote_code=True,
22
+ )
23
+ model.resize_token_embeddings(len(self.tokenizer))
24
+ model = PeftModel.from_pretrained(model, path)
25
+ except Exception:
26
+ model = AutoModelForCausalLM.from_pretrained(
27
+ path, device_map="auto", load_in_8bit=True, torch_dtype=torch.float16, trust_remote_code=True
28
+ )
29
+ self.model = model
30
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
33
+ # process input
34
+ inputs = data.pop("inputs", data)
35
+ parameters = data.pop("parameters", None)
36
+
37
+ # preprocess
38
+ inputs = self.tokenizer(inputs, return_tensors="pt").to(self.device)
39
+
40
+ # pass inputs with all kwargs in data
41
+ if parameters is not None:
42
+ outputs = self.model.generate(**inputs, **parameters)
43
+ else:
44
+ outputs = self.model.generate(**inputs)
45
+
46
+ # postprocess the prediction
47
+ prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+
49
+ return [{"generated_text": prediction}]
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848535d0c25ef70e64ba3f99d2245d3c29a979f3bd1091dbfb4890d4f6833f67
3
+ size 333982457
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers==4.28.1
2
+ accelerate>=0.16.0
3
+ bitsandbytes
4
+ sentencepiece
5
+ git+https://github.com/huggingface/peft.git@632997d1fb776c3cf05d8c2537ac9a98a7ce9435
runs/May30_12-16-44_ip-26-0-150-12/1685449023.6511703/events.out.tfevents.1685449023.ip-26-0-150-12.3309750.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cffdc263be7cc38468995e31f399977941fa03e60a96b930d462cf6934846ebe
3
+ size 6282
runs/May30_12-16-44_ip-26-0-150-12/events.out.tfevents.1685449023.ip-26-0-150-12.3309750.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13c39bc659b4bb330ed78df136c596d1152904b26f01bd51f17070a33aae8b47
3
+ size 5391
runs/May30_12-16-44_ip-26-0-150-12/events.out.tfevents.1685449035.ip-26-0-150-12.3309750.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc8b768f35e3b2a5fe2663f574e03c376b1f6479850b118e17d5ff834052dfc1
3
+ size 354
special_tokens_map.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|system|>",
4
+ "<|user|>",
5
+ "<|assistant|>",
6
+ "<|end|>"
7
+ ],
8
+ "bos_token": "<|endoftext|>",
9
+ "eos_token": "<|endoftext|>",
10
+ "unk_token": "<|endoftext|>"
11
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1024,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.0,
3
+ "train_loss": 41.470333099365234,
4
+ "train_runtime": 8.8744,
5
+ "train_samples": 4005,
6
+ "train_samples_per_second": 1.352,
7
+ "train_steps_per_second": 0.113
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.0029940119760479044,
5
+ "global_step": 1,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 5e-05,
13
+ "loss": 41.4703,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.0,
18
+ "eval_loss": 44.2744026184082,
19
+ "eval_runtime": 2.632,
20
+ "eval_samples_per_second": 166.03,
21
+ "eval_steps_per_second": 14.057,
22
+ "step": 1
23
+ },
24
+ {
25
+ "epoch": 0.0,
26
+ "step": 1,
27
+ "total_flos": 3135561007104.0,
28
+ "train_loss": 41.470333099365234,
29
+ "train_runtime": 8.8744,
30
+ "train_samples_per_second": 1.352,
31
+ "train_steps_per_second": 0.113
32
+ }
33
+ ],
34
+ "max_steps": 1,
35
+ "num_train_epochs": 1,
36
+ "total_flos": 3135561007104.0,
37
+ "trial_name": null,
38
+ "trial_params": null
39
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f90f76c79c744e69aec7779d113507e23a9938df4c4b4680312f5ec4444f700
3
+ size 3899
vocab.json ADDED
The diff for this file is too large to render. See raw diff