chenxingli commited on
Commit
d062a5e
·
1 Parent(s): ce3474d

Upload my pretrained model

Browse files
STA-V2A/best/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5ed7e5684512bddd3071d4bac9aa772f2c328b46a969ee92d01b2b10ec599aa
3
+ size 2193962117
STA-V2A/best/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c068ef545f11557271908557c3edf0dba6dfe87ddb37b7f5014f8bf87c43cfa
3
+ size 442714114
STA-V2A/best/pytorch_model_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba6d10e4a565f922395be2e8a9e872a5342005055ebaf64963d195553e05ae71
3
+ size 8538252
STA-V2A/best/pytorch_model_2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94e6d440140258253b183dfee25bafacfb2712f58638680acc9f3730059cd9ce
3
+ size 3808963356
STA-V2A/best/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1adb68ee3baf1a31d35e3864013abca08ad8f2ff5cf272968fef97fcfda34db0
3
+ size 15580
STA-V2A/best/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd73e2d56fec237d53bae1e2e58c3988365819ae34f2d633cd5944c172ab998b
3
+ size 1000
STA-V2A/summary.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train_file": "", "validation_file": "", "test_file": "", "num_examples": -1, "text_encoder_name": "./mt5-large", "scheduler_name": "stabilityai/stable-diffusion-2-1", "unet_model_name": "./saved/unet/pretrain_ft", "unet_model_config": null, "hf_model": null, "snr_gamma": 5.0, "freeze_text_encoder": true, "text_column": "caption", "audio_column": "audio_location", "video_column": "frame_pt", "feature_column": "cavp_feature_location", "augment": false, "uncondition": true, "prefix": null, "per_device_train_batch_size": 20, "per_device_eval_batch_size": 20, "learning_rate": 3e-05, "weight_decay": 1e-08, "num_train_epochs": 40, "max_train_steps": null, "gradient_accumulation_steps": 2, "lr_scheduler_type": "linear", "num_warmup_steps": 0, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_weight_decay": 0.01, "adam_epsilon": 1e-08, "output_dir": "saved/STA-V2A", "seed": null, "checkpointing_steps": "best", "save_every": 1, "resume_from_checkpoint": null, "resume_diff_from_checkpoint": null, "vae_model": "audioldm-s-full", "sample_rate": 16000, "with_tracking": false, "var_len": false, "report_to": "all", "video_fps": 40, "fraze_unet": false, "predict_onset_model": null, "has_global_video_feature": true, "use_feature_window": true, "guidance_free_rate": 0.2, "Onset_weight": 1.0}
pretrained_model/audioldm-s-full.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8d1923410622be823279b61967d27a2df3fd03ddd764afb298e7c20ef8877d
3
+ size 2558947469
pretrained_model/clap/clap-htsat-unfused/README.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ # Model card for CLAP
5
+
6
+ Model card for CLAP: Contrastive Language-Audio Pretraining
7
+
8
+ ![clap_image](https://s3.amazonaws.com/moonup/production/uploads/1678811100805-62441d1d9fdefb55a0b7d12c.png)
9
+
10
+
11
+ # Table of Contents
12
+
13
+ 0. [TL;DR](#TL;DR)
14
+ 1. [Model Details](#model-details)
15
+ 2. [Usage](#usage)
16
+ 3. [Uses](#uses)
17
+ 4. [Citation](#citation)
18
+
19
+ # TL;DR
20
+
21
+ The abstract of the paper states that:
22
+
23
+ > Contrastive learning has shown remarkable success in the field of multimodal representation learning. In this paper, we propose a pipeline of contrastive language-audio pretraining to develop an audio representation by combining audio data with natural language descriptions. To accomplish this target, we first release LAION-Audio-630K, a large collection of 633,526 audio-text pairs from different data sources. Second, we construct a contrastive language-audio pretraining model by considering different audio encoders and text encoders. We incorporate the feature fusion mechanism and keyword-to-caption augmentation into the model design to further enable the model to process audio inputs of variable lengths and enhance the performance. Third, we perform comprehensive experiments to evaluate our model across three tasks: text-to-audio retrieval, zero-shot audio classification, and supervised audio classification. The results demonstrate that our model achieves superior performance in text-to-audio retrieval task. In audio classification tasks, the model achieves state-of-the-art performance in the zero-shot setting and is able to obtain performance comparable to models' results in the non-zero-shot setting. LAION-Audio-630K and the proposed model are both available to the public.
24
+
25
+
26
+ # Usage
27
+
28
+ You can use this model for zero shot audio classification or extracting audio and/or textual features.
29
+
30
+ # Uses
31
+
32
+ ## Perform zero-shot audio classification
33
+
34
+ ### Using `pipeline`
35
+
36
+ ```python
37
+ from datasets import load_dataset
38
+ from transformers import pipeline
39
+
40
+ dataset = load_dataset("ashraq/esc50")
41
+ audio = dataset["train"]["audio"][-1]["array"]
42
+
43
+ audio_classifier = pipeline(task="zero-shot-audio-classification", model="laion/clap-htsat-unfused")
44
+ output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"])
45
+ print(output)
46
+ >>> [{"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}]
47
+ ```
48
+
49
+ ## Run the model:
50
+
51
+ You can also get the audio and text embeddings using `ClapModel`
52
+
53
+ ### Run the model on CPU:
54
+
55
+ ```python
56
+ from datasets import load_dataset
57
+ from transformers import ClapModel, ClapProcessor
58
+
59
+ librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
60
+ audio_sample = librispeech_dummy[0]
61
+
62
+ model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
63
+ processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused")
64
+
65
+ inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt")
66
+ audio_embed = model.get_audio_features(**inputs)
67
+ ```
68
+
69
+ ### Run the model on GPU:
70
+
71
+ ```python
72
+ from datasets import load_dataset
73
+ from transformers import ClapModel, ClapProcessor
74
+
75
+ librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
76
+ audio_sample = librispeech_dummy[0]
77
+
78
+ model = ClapModel.from_pretrained("laion/clap-htsat-unfused").to(0)
79
+ processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused")
80
+
81
+ inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt").to(0)
82
+ audio_embed = model.get_audio_features(**inputs)
83
+ ```
84
+
85
+
86
+ # Citation
87
+
88
+ If you are using this model for your work, please consider citing the original paper:
89
+ ```
90
+ @misc{https://doi.org/10.48550/arxiv.2211.06687,
91
+ doi = {10.48550/ARXIV.2211.06687},
92
+
93
+ url = {https://arxiv.org/abs/2211.06687},
94
+
95
+ author = {Wu, Yusong and Chen, Ke and Zhang, Tianyu and Hui, Yuchen and Berg-Kirkpatrick, Taylor and Dubnov, Shlomo},
96
+
97
+ keywords = {Sound (cs.SD), Audio and Speech Processing (eess.AS), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Electrical engineering, electronic engineering, information engineering, FOS: Electrical engineering, electronic engineering, information engineering},
98
+
99
+ title = {Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation},
100
+
101
+ publisher = {arXiv},
102
+
103
+ year = {2022},
104
+
105
+ copyright = {Creative Commons Attribution 4.0 International}
106
+ }
107
+ ```
pretrained_model/clap/laion_clap/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: cc0-1.0
3
+ ---
pretrained_model/clap/msclap/README.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: ms-pl
3
+ tags:
4
+ - contrastive audio language pretraining
5
+ - audio
6
+ - music
7
+ - emotion
8
+ - sound events
9
+ - bioacoustics
10
+ - retrieval
11
+ - captioning
12
+ - zero-shot
13
+ - audio-text
14
+ - CLAP
15
+ ---
16
+
17
+ ###### [Overview](#CLAP) | [Setup](#Setup) | [CLAP weights](#CLAP-weights) | [Usage](#Usage) | [Examples](#Examples) | [Citation](#Citation)
18
+
19
+ # CLAP
20
+
21
+ CLAP (Contrastive Language-Audio Pretraining) is a model that learns acoustic concepts from natural language supervision and enables “Zero-Shot” inference. The model has been extensively evaluated in 26 audio downstream tasks achieving SoTA in several of them including classification, retrieval, and captioning.
22
+
23
+ <img width="832" alt="clap_diagrams" src="docs/clap2_diagram.png">
24
+
25
+ ## Setup
26
+
27
+ First, install python 3.8 or higher (3.11 recommended). Then, install CLAP using either of the following:
28
+
29
+ ```shell
30
+ # Install pypi pacakge
31
+ pip install msclap
32
+
33
+ # Or Install latest (unstable) git source
34
+ pip install git+https://github.com/microsoft/CLAP.git
35
+ ```
36
+
37
+ ## NEW CLAP weights
38
+ CLAP weights: versions _2022_, _2023_, and _clapcap_
39
+
40
+ _clapcap_ is the audio captioning model that uses the 2023 encoders.
41
+
42
+ ## Usage
43
+
44
+ CLAP code is in https://github.com/microsoft/CLAP
45
+
46
+ - Zero-Shot Classification and Retrieval
47
+ ```python
48
+ from msclap import CLAP
49
+
50
+ # Load model (Choose between versions '2022' or '2023')
51
+ clap_model = CLAP("<PATH TO WEIGHTS>", version = '2023', use_cuda=False)
52
+
53
+ # Extract text embeddings
54
+ text_embeddings = clap_model.get_text_embeddings(class_labels: List[str])
55
+
56
+ # Extract audio embeddings
57
+ audio_embeddings = clap_model.get_audio_embeddings(file_paths: List[str])
58
+
59
+ # Compute similarity between audio and text embeddings
60
+ similarities = clap_model.compute_similarity(audio_embeddings, text_embeddings)
61
+ ```
62
+
63
+ - Audio Captioning
64
+ ```python
65
+ from msclap import CLAP
66
+
67
+ # Load model (Choose version 'clapcap')
68
+ clap_model = CLAP("<PATH TO WEIGHTS>", version = 'clapcap', use_cuda=False)
69
+
70
+ # Generate audio captions
71
+ captions = clap_model.generate_caption(file_paths: List[str])
72
+ ```
73
+
74
+
75
+ ## Citation
76
+
77
+ Kindly cite our work if you find it useful.
78
+
79
+ [CLAP: Learning Audio Concepts from Natural Language Supervision](https://ieeexplore.ieee.org/abstract/document/10095889)
80
+ ```
81
+ @inproceedings{CLAP2022,
82
+ title={Clap learning audio concepts from natural language supervision},
83
+ author={Elizalde, Benjamin and Deshmukh, Soham and Al Ismail, Mahmoud and Wang, Huaming},
84
+ booktitle={ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
85
+ pages={1--5},
86
+ year={2023},
87
+ organization={IEEE}
88
+ }
89
+ ```
90
+
91
+ [Natural Language Supervision for General-Purpose Audio Representations](https://arxiv.org/abs/2309.05767)
92
+ ```
93
+ @misc{CLAP2023,
94
+ title={Natural Language Supervision for General-Purpose Audio Representations},
95
+ author={Benjamin Elizalde and Soham Deshmukh and Huaming Wang},
96
+ year={2023},
97
+ eprint={2309.05767},
98
+ archivePrefix={arXiv},
99
+ primaryClass={cs.SD},
100
+ url={https://arxiv.org/abs/2309.05767}
101
+ }
102
+ ```
103
+
104
+ ## Trademarks
105
+
106
+ This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
107
+ trademarks or logos is subject to and must follow
108
+ [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
109
+ Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
110
+ Any use of third-party trademarks or logos are subject to those third-party's policies.
pretrained_model/gpt2/README.md ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - exbert
5
+
6
+ license: mit
7
+ ---
8
+
9
+
10
+ # GPT-2
11
+
12
+ Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large
13
+
14
+ Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in
15
+ [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)
16
+ and first released at [this page](https://openai.com/blog/better-language-models/).
17
+
18
+ Disclaimer: The team releasing GPT-2 also wrote a
19
+ [model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card
20
+ has been written by the Hugging Face team to complete the information they provided and give specific examples of bias.
21
+
22
+ ## Model description
23
+
24
+ GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This
25
+ means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots
26
+ of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely,
27
+ it was trained to guess the next word in sentences.
28
+
29
+ More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence,
30
+ shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the
31
+ predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens.
32
+
33
+ This way, the model learns an inner representation of the English language that can then be used to extract features
34
+ useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a
35
+ prompt.
36
+
37
+ This is the **smallest** version of GPT-2, with 124M parameters.
38
+
39
+ **Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl)
40
+
41
+ ## Intended uses & limitations
42
+
43
+ You can use the raw model for text generation or fine-tune it to a downstream task. See the
44
+ [model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you.
45
+
46
+ ### How to use
47
+
48
+ You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we
49
+ set a seed for reproducibility:
50
+
51
+ ```python
52
+ >>> from transformers import pipeline, set_seed
53
+ >>> generator = pipeline('text-generation', model='gpt2')
54
+ >>> set_seed(42)
55
+ >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5)
56
+
57
+ [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."},
58
+ {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"},
59
+ {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"},
60
+ {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"},
61
+ {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}]
62
+ ```
63
+
64
+ Here is how to use this model to get the features of a given text in PyTorch:
65
+
66
+ ```python
67
+ from transformers import GPT2Tokenizer, GPT2Model
68
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
69
+ model = GPT2Model.from_pretrained('gpt2')
70
+ text = "Replace me by any text you'd like."
71
+ encoded_input = tokenizer(text, return_tensors='pt')
72
+ output = model(**encoded_input)
73
+ ```
74
+
75
+ and in TensorFlow:
76
+
77
+ ```python
78
+ from transformers import GPT2Tokenizer, TFGPT2Model
79
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
80
+ model = TFGPT2Model.from_pretrained('gpt2')
81
+ text = "Replace me by any text you'd like."
82
+ encoded_input = tokenizer(text, return_tensors='tf')
83
+ output = model(encoded_input)
84
+ ```
85
+
86
+ ### Limitations and bias
87
+
88
+ The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of
89
+ unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their
90
+ [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases):
91
+
92
+ > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases
93
+ > that require the generated text to be true.
94
+ >
95
+ > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do
96
+ > not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a
97
+ > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race,
98
+ > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar
99
+ > levels of caution around use cases that are sensitive to biases around human attributes.
100
+
101
+ Here's an example of how the model can have biased predictions:
102
+
103
+ ```python
104
+ >>> from transformers import pipeline, set_seed
105
+ >>> generator = pipeline('text-generation', model='gpt2')
106
+ >>> set_seed(42)
107
+ >>> generator("The White man worked as a", max_length=10, num_return_sequences=5)
108
+
109
+ [{'generated_text': 'The White man worked as a mannequin for'},
110
+ {'generated_text': 'The White man worked as a maniser of the'},
111
+ {'generated_text': 'The White man worked as a bus conductor by day'},
112
+ {'generated_text': 'The White man worked as a plumber at the'},
113
+ {'generated_text': 'The White man worked as a journalist. He had'}]
114
+
115
+ >>> set_seed(42)
116
+ >>> generator("The Black man worked as a", max_length=10, num_return_sequences=5)
117
+
118
+ [{'generated_text': 'The Black man worked as a man at a restaurant'},
119
+ {'generated_text': 'The Black man worked as a car salesman in a'},
120
+ {'generated_text': 'The Black man worked as a police sergeant at the'},
121
+ {'generated_text': 'The Black man worked as a man-eating monster'},
122
+ {'generated_text': 'The Black man worked as a slave, and was'}]
123
+ ```
124
+
125
+ This bias will also affect all fine-tuned versions of this model.
126
+
127
+ ## Training data
128
+
129
+ The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web
130
+ pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from
131
+ this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights
132
+ 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText
133
+ [here](https://github.com/openai/gpt-2/blob/master/domains.txt).
134
+
135
+ ## Training procedure
136
+
137
+ ### Preprocessing
138
+
139
+ The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a
140
+ vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens.
141
+
142
+ The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact
143
+ details of training.
144
+
145
+ ## Evaluation results
146
+
147
+ The model achieves the following results without any fine-tuning (zero-shot):
148
+
149
+ | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW |
150
+ |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:|
151
+ | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) |
152
+ | | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 |
153
+
154
+
155
+ ### BibTeX entry and citation info
156
+
157
+ ```bibtex
158
+ @article{radford2019language,
159
+ title={Language Models are Unsupervised Multitask Learners},
160
+ author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},
161
+ year={2019}
162
+ }
163
+ ```
164
+
165
+ <a href="https://huggingface.co/exbert/?model=gpt2">
166
+ <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
167
+ </a>
unet/pretrain_ft/config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.20.0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "attention_type": "default",
10
+ "block_out_channels": [
11
+ 128,
12
+ 256,
13
+ 384,
14
+ 640
15
+ ],
16
+ "center_input_sample": false,
17
+ "class_embed_type": null,
18
+ "class_embeddings_concat": false,
19
+ "conv_in_kernel": 3,
20
+ "conv_out_kernel": 3,
21
+ "cross_attention_dim": 1024,
22
+ "cross_attention_norm": null,
23
+ "down_block_types": [
24
+ "DownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D"
28
+ ],
29
+ "downsample_padding": 1,
30
+ "dual_cross_attention": false,
31
+ "encoder_hid_dim": null,
32
+ "encoder_hid_dim_type": null,
33
+ "flip_sin_to_cos": true,
34
+ "freq_shift": 0,
35
+ "in_channels": 8,
36
+ "layers_per_block": 2,
37
+ "mid_block_only_cross_attention": null,
38
+ "mid_block_scale_factor": 1,
39
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
40
+ "norm_eps": 1e-05,
41
+ "norm_num_groups": 32,
42
+ "num_attention_heads": null,
43
+ "num_class_embeds": null,
44
+ "only_cross_attention": false,
45
+ "out_channels": 8,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_out_scale_factor": 1.0,
48
+ "resnet_skip_time_act": false,
49
+ "resnet_time_scale_shift": "default",
50
+ "sample_size": 128,
51
+ "time_cond_proj_dim": null,
52
+ "time_embedding_act_fn": null,
53
+ "time_embedding_dim": null,
54
+ "time_embedding_type": "positional",
55
+ "timestep_post_act": null,
56
+ "transformer_layers_per_block": 1,
57
+ "up_block_types": [
58
+ "CrossAttnUpBlock2D",
59
+ "CrossAttnUpBlock2D",
60
+ "CrossAttnUpBlock2D",
61
+ "UpBlock2D"
62
+ ],
63
+ "upcast_attention": false,
64
+ "use_linear_projection": false
65
+ }
unet/pretrain_ft/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6aad8cdb14c7ff74a72ab774e3ce23f19f6337fa22fabda4068aff7f99a35ac
3
+ size 751830529