| Serialized with: | |
| ```python | |
| import torch | |
| from transformers import CLIPTextConfig, CLIPTextModelWithProjection | |
| def get_dummy_components(): | |
| clip_text_encoder_config = CLIPTextConfig( | |
| bos_token_id=0, | |
| eos_token_id=2, | |
| hidden_size=32, | |
| intermediate_size=37, | |
| layer_norm_eps=1e-05, | |
| num_attention_heads=4, | |
| num_hidden_layers=5, | |
| pad_token_id=1, | |
| vocab_size=1000, | |
| hidden_act="gelu", | |
| projection_dim=32, | |
| ) | |
| torch.manual_seed(0) | |
| text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) | |
| torch.manual_seed(0) | |
| text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) | |
| return text_encoder, text_encoder_2 | |
| text_encoder, text_encoder_2 = get_dummy_components() | |
| text_encoder.push_to_hub("hf-internal-testing/tiny-sd3-text_encoder") | |
| text_encoder_2.push_to_hub("hf-internal-testing/tiny-sd3-text_encoder-2") | |
| ``` |