pszemraj SFconvertbot commited on
Commit
c9ad5ea
·
verified ·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files

Co-authored-by: SFconvertbot <SFconvertbot@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ tags:
4
+ - generated_from_trainer
5
+ - text generation
6
+ - stable diffusion
7
+ - midjourney
8
+ - text2image
9
+ - text to image
10
+ - prompt augment
11
+ - prompt engineering
12
+ thumbnail: https://i.imgur.com/DeKNHtC.jpg
13
+ datasets:
14
+ - pszemraj/text2image-multi-prompt
15
+ widget:
16
+ - text: "morning sun over Jakarta"
17
+ example_title: "morning sun"
18
+ - text: "WARNING: pip is"
19
+ example_title: "pip"
20
+ - text: "sentient cheese"
21
+ example_title: "sentient cheese"
22
+ - text: "cheeps are"
23
+ example_title: "cheeps"
24
+ - text: "avocado armchair"
25
+ example_title: "creative prompt"
26
+ - text: "Landscape of"
27
+ example_title: "landscape"
28
+ parameters:
29
+ min_length: 16
30
+ max_length: 96
31
+ no_repeat_ngram_size: 1
32
+ do_sample: True
33
+ ---
34
+
35
+
36
+ # pszemraj/opt-350m-multiprompt
37
+
38
+ <a href="https://colab.research.google.com/gist/pszemraj/bdd1238ee4b8330aeec6774a16f9a677/opt-350m-multiprompt-demo.ipynb">
39
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
40
+ </a>
41
+
42
+ Generate/augment your prompt with a model trained on a large & diverse prompt dataset.
43
+
44
+ This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on the pszemraj/text2image-prompts-multi dataset.
45
+ It achieves the following results on the evaluation set:
46
+ - Loss: 1.6669
47
+ - eval steps per second: 16.21
48
+ - perplexity: 5.29
49
+
50
+ ## Example
51
+
52
+
53
+ ![landscape of florida](https://i.imgur.com/DeKNHtC.jpg)
54
+
55
+ <br>
56
+
57
+ _The above example was created with [DALL-E 2](https://labs.openai.com/sc/YbiY2kkuQeODzHNwUHn4D5RN) but will of course work with any text2image model._
58
+
59
+ ## Intended uses & limitations
60
+
61
+ - The model will generate augmentations that are biased towards the training data, i.e. what people already asked for in the SD/midjourney discords, etc. Creating a larger dataset was an attempt at mitigating this through more data from different datasets.
62
+
63
+ ## Training and evaluation data
64
+
65
+ See the `pszemraj/text2image-prompts-multi` dataset card for details. The dataset is a compilation of several text-to-image prompt datasets on huggingface :)
66
+
67
+ ## Training procedure
68
+
69
+ ### Training hyperparameters
70
+
71
+ The following hyperparameters were used during training:
72
+ - learning_rate: 0.0002
73
+ - train_batch_size: 8
74
+ - eval_batch_size: 4
75
+ - seed: 42
76
+ - distributed_type: multi-GPU
77
+ - num_devices: 2
78
+ - gradient_accumulation_steps: 16
79
+ - total_train_batch_size: 256
80
+ - total_eval_batch_size: 8
81
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
82
+ - lr_scheduler_type: cosine
83
+ - lr_scheduler_warmup_ratio: 0.04
84
+ - num_epochs: 4.0
85
+
86
+ ### Training results
87
+
88
+ | Training Loss | Epoch | Step | Validation Loss |
89
+ |:-------------:|:-----:|:----:|:---------------:|
90
+ | 2.1677 | 1.0 | 990 | 2.0888 |
91
+ | 1.856 | 2.0 | 1980 | 1.8215 |
92
+ | 1.6864 | 3.0 | 2970 | 1.6935 |
93
+ | 1.6228 | 4.0 | 3960 | 1.6670 |
94
+
95
+
96
+ ### Framework versions
97
+
98
+ - Transformers 4.25.0.dev0
99
+ - Pytorch 1.13.0+cu117
100
+ - Datasets 2.6.1
101
+ - Tokenizers 0.13.1
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_loss": 1.6668897867202759,
4
+ "eval_runtime": 102.7014,
5
+ "eval_samples": 13319,
6
+ "eval_samples_per_second": 129.687,
7
+ "eval_steps_per_second": 16.212,
8
+ "perplexity": 5.295671489170355,
9
+ "train_loss": 1.9931427570304485,
10
+ "train_runtime": 30194.781,
11
+ "train_samples": 253694,
12
+ "train_samples_per_second": 33.608,
13
+ "train_steps_per_second": 0.131
14
+ }
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-350m",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": false,
12
+ "dropout": 0.1,
13
+ "eos_token_id": 2,
14
+ "ffn_dim": 4096,
15
+ "hidden_size": 1024,
16
+ "init_std": 0.02,
17
+ "layerdrop": 0.0,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "opt",
20
+ "num_attention_heads": 16,
21
+ "num_hidden_layers": 24,
22
+ "pad_token_id": 1,
23
+ "prefix": "</s>",
24
+ "transformers_version": "4.25.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 50272,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "no_repeat_ngram_size": 2,
31
+ "repetition_penalty": 4.5,
32
+ "max_length": 48
33
+ }
34
+ },
35
+ "word_embed_proj_dim": 512
36
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_loss": 1.6668897867202759,
4
+ "eval_runtime": 102.7014,
5
+ "eval_samples": 13319,
6
+ "eval_samples_per_second": 129.687,
7
+ "eval_steps_per_second": 16.212,
8
+ "perplexity": 5.295671489170355
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57c49cdb01dc9557f4455094d76c12c2ec4030d8aed0784501af7b7f0b611f5
3
+ size 662438164
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:743eb512852251be08a75f53fcbdc54da6810c302f91b9f948513d6064ebc538
3
+ size 662524445
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "</s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "name_or_path": "facebook/opt-350m",
22
+ "pad_token": {
23
+ "__type": "AddedToken",
24
+ "content": "<pad>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "special_tokens_map_file": null,
31
+ "tokenizer_class": "GPT2Tokenizer",
32
+ "unk_token": {
33
+ "__type": "AddedToken",
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "train_loss": 1.9931427570304485,
4
+ "train_runtime": 30194.781,
5
+ "train_samples": 253694,
6
+ "train_samples_per_second": 33.608,
7
+ "train_steps_per_second": 0.131
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:017f65ad7bf3b0edcc77f94ff61fa646a524820ede2792c36a5ba61057311c70
3
+ size 3387
vocab.json ADDED
The diff for this file is too large to render. See raw diff