modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AvatarXD/DialoGPT-medium-Blitzo
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2023-04-21T14:08:25Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-distilled-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos config: plus split: validation args: plus metrics: - name: Accuracy type: accuracy value: 0.9483870967741935 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-distilled-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.2141 - Accuracy: 0.9484 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.4176 | 1.0 | 1907 | 0.7492 | 0.8610 | | 0.336 | 2.0 | 3814 | 0.2997 | 0.9368 | | 0.174 | 3.0 | 5721 | 0.2329 | 0.9468 | | 0.122 | 4.0 | 7628 | 0.2155 | 0.9484 | | 0.1068 | 5.0 | 9535 | 0.2141 | 0.9484 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.11.0+cu113 - Datasets 2.11.0 - Tokenizers 0.13.3
Aviora/news2vec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T14:09:18Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: detr-model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # detr-model This model is a fine-tuned version of [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5768 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Axcel/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2023-04-21T14:14:13Z
--- license: openrail library_name: diffusers pipeline_tag: text-to-image ---
Axon/resnet18-v1
[ "dataset:ImageNet", "arxiv:1512.03385", "Axon", "Elixir", "license:apache-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T14:16:34Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5366931756163555 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8479 - Matthews Correlation: 0.5367 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5241 | 1.0 | 535 | 0.5326 | 0.4215 | | 0.3476 | 2.0 | 1070 | 0.5161 | 0.4762 | | 0.2379 | 3.0 | 1605 | 0.5795 | 0.5341 | | 0.1735 | 4.0 | 2140 | 0.7868 | 0.5203 | | 0.1232 | 5.0 | 2675 | 0.8479 | 0.5367 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Aybars/ModelOnTquad
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T14:26:09Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.38 +/- 0.87 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Aybars/ModelOnWhole
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-21T14:30:21Z
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: donut-commoncrawl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-commoncrawl This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0 - Datasets 2.11.0 - Tokenizers 0.13.2
Ayham/albert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-04-21T14:37:27Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1532405179009716226/CgIPmeYl_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Will Knight</div> <div style="text-align: center; font-size: 14px;">@willknight</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Will Knight. | Data | Will Knight | | --- | --- | | Tweets downloaded | 3226 | | Retweets | 704 | | Short tweets | 261 | | Tweets kept | 2261 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/n84xe0hu/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @willknight's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/nyp5c9d2) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/nyp5c9d2/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/willknight') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Ayham/albert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-04-21T14:38:09Z
Tevatron reranker ``` examples/reranker/reranker_train.py \ --output_dir reranker_xlmr.bs-32.epoch-1.mmarco \ --model_name_or_path xlm-roberta-large --save_steps 20000 --dataset_name crystina-z/mmarco-train:all --fp16 --per_device_train_batch_size 4 --gradient_accumulation_steps 8 --train_n_passages 8 --learning_rate 5e-6 --q_max_len 16 --p_max_len 128 --num_train_epochs 1 --logging_steps 500 --dataloader_num_workers 4 --overwrite_output_dir ```
Ayham/albert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-04-21T14:38:20Z
--- datasets: - csebuetnlp/xlsum language: - am - ar - az - bn - my - zh - en - fr - gu - ha - hi - ig - id - ja - rn - ko - ky - mr - ne - om - ps - fa - pcm - pt - pa - ru - gd - sr - si - so - es - sw - ta - te - th - ti - tr - uk - ur - uz - vi - cy - yo multilinguality: - multilingual pipeline_tag: summarization --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This model is fine-tuned version of [DeltaLM-base](https://huggingface.co/nguyenvulebinh/deltalm-base) on the [XLSum dataset](https://huggingface.co/datasets/csebuetnlp/xlsum) , aiming for abstractive multilingual summarization. It achieves the following results on the evaluation set: - rouge-1: 18.2 - rouge-2: 7.6 - rouge-l: 14.9 - rouge-lsum: 14.7 ## Dataset desctiption [XLSum dataset](https://huggingface.co/datasets/csebuetnlp/xlsum) is a comprehensive and diverse dataset comprising 1.35 million professionally annotated article-summary pairs from BBC, extracted using a set of carefully designed heuristics. The dataset covers 45 languages ranging from low to high-resource, for many of which no public dataset is currently available. XL-Sum is highly abstractive, concise, and of high quality, as indicated by human and intrinsic evaluation. ## Languages - amharic - arabic - azerbaijani - bengali - burmese - chinese_simplified - chinese_traditional - english - french - gujarati - hausa - hindi - igbo - indonesian - japanese - kirundi - korean - kyrgyz - marathi - nepali - oromo - pashto - persian - pidgin - portuguese - punjabi - russian - scottish_gaelic - serbian_cyrillic - serbian_latin - sinhala - somali - spanish - swahili - tamil - telugu - thai - tigrinya - turkish - ukrainian - urdu - uzbek - vietnamese - welsh - yoruba ## Training hyperparameters The model trained with a p4d.24xlarge instance on aws sagemaker, with the following config: - model: deltalm base - batch size: 8 - learning rate: 1e-5 - number of epochs: 3 - warmup steps: 500 - weight decay: 0.01 ## Inference example ``` from modeling_deltalm import DeltalmForConditionalGeneration # download from https://huggingface.co/hhhhzy/deltalm-base-xlsum/blob/main/modeling_deltalm.py from configuration_deltalm import DeltalmConfig # download from https://huggingface.co/hhhhzy/deltalm-base-xlsum/blob/main/configuration_deltalm.py from transformers import AutoTokenizer model = DeltalmForConditionalGeneration.from_pretrained("hhhhzy/deltalm-base-xlsum") tokenizer = AutoTokenizer.from_pretrained("hhhhzy/deltalm-base-xlsum") text = "The USA’s biggest sports league, the NFL, has extended its partnership with Amazon Prime, granting the streaming platform an additional live game on ‘black Friday’, the day after Thanksgiving. The additional game, added from 2023, builds on Amazon Prime’s package of ‘Thursday night football’ live rights (secured in an 11-year deal).\\nOn the surface, the deal makes sense because it gives Amazon Prime additional game time during the holiday season. But there is a deeper motivation at play. Black Friday is also regarded as the starting point of the pre-Christmas shopping season. Amazon has worked hard to leverage its sports rights in a way that benefits its ecommerce platform, so the addition of this fixture will boost that strategic goal.\\nIt’s unusual for sports rights holders to utilise their inventory in such a granular way – but it does suggest a shift towards a more data-driven approach to negotiations. For NFL, the deal means it now has partnerships with NBC, CBS, Fox and Amazon across the Thanksgiving period. Amazon Prime is currently in the NFL’s good books, helping revitalise the Thursday night slot through its marketing support and onscreen investment. Around 10 million people in the US are watching live fixtures each week." inputs = tokenizer(text, max_length=512, return_tensors="pt") generate_ids = model.generate(inputs["input_ids"], min_length=32, max_length=128) tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] ```
Ayham/bert_gpt2_summarization_cnndm_new
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T14:45:41Z
--- language: en license: apache-2.0 library_name: pytorch tags: - deep-reinforcement-learning - reinforcement-learning - DI-engine - Walker2d-v3 benchmark_name: OpenAI/Gym/MuJoCo task_name: Walker2d-v3 pipeline_tag: reinforcement-learning model-index: - name: TD3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: OpenAI/Gym/MuJoCo-Walker2d-v3 type: OpenAI/Gym/MuJoCo-Walker2d-v3 metrics: - type: mean_reward value: 4323.51 +/- 14.71 name: mean_reward --- # Play **Walker2d-v3** with **TD3** Policy ## Model Description <!-- Provide a longer summary of what this model is. --> This is a simple **TD3** implementation to OpenAI/Gym/MuJoCo **Walker2d-v3** using the [DI-engine library](https://github.com/opendilab/di-engine) and the [DI-zoo](https://github.com/opendilab/DI-engine/tree/main/dizoo). **DI-engine** is a python library for solving general decision intelligence problems, which is based on implementations of reinforcement learning framework using PyTorch or JAX. This library aims to standardize the reinforcement learning framework across different algorithms, benchmarks, environments, and to support both academic researches and prototype applications. Besides, self-customized training pipelines and applications are supported by reusing different abstraction levels of DI-engine reinforcement learning framework. ## Model Usage ### Install the Dependencies <details close> <summary>(Click for Details)</summary> ```shell # install huggingface_ding git clone https://github.com/opendilab/huggingface_ding.git pip3 install -e ./huggingface_ding/ # install environment dependencies if needed sudo apt update -y && sudo apt install -y build-essential libgl1-mesa-dev libgl1-mesa-glx libglew-dev libosmesa6-dev libglfw3 libglfw3-dev libsdl2-dev libsdl2-image-dev libglm-dev libfreetype6-dev patchelf mkdir -p ~/.mujoco wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz tar -xf mujoco.tar.gz -C ~/.mujoco echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/.mujoco/mjpro210/bin:~/.mujoco/mujoco210/bin" >> ~/.bashrc export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/.mujoco/mjpro210/bin:~/.mujoco/mujoco210/bin pip3 install DI-engine[common_env] ``` </details> ### Git Clone from Huggingface and Run the Model <details close> <summary>(Click for Details)</summary> ```shell # running with trained model python3 -u run.py ``` **run.py** ```python from ding.bonus import SACAgent from ding.config import Config from easydict import EasyDict import torch # Pull model from files which are git cloned from huggingface policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu")) cfg = EasyDict(Config.file_to_dict("policy_config.py")) # Instantiate the agent agent = SACAgent(env="Walker2d", exp_name="Walker2d-v3-TD3", cfg=cfg.exp_config, policy_state_dict=policy_state_dict) # Continue training agent.train(step=5000) # Render the new agent performance agent.deploy(enable_save_replay=True) ``` </details> ### Run Model by Using Huggingface_ding <details close> <summary>(Click for Details)</summary> ```shell # running with trained model python3 -u run.py ``` **run.py** ```python from ding.bonus import TD3Agent from huggingface_ding import pull_model_from_hub # Pull model from Hugggingface hub policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/Walker2d-v3-TD3") # Instantiate the agent agent = TD3Agent(env="Walker2d", exp_name="Walker2d-v3-TD3", cfg=cfg.exp_config, policy_state_dict=policy_state_dict) # Continue training agent.train(step=5000) # Render the new agent performance agent.deploy(enable_save_replay=True) ``` </details> ## Model Training ### Train the Model and Push to Huggingface_hub <details close> <summary>(Click for Details)</summary> ```shell #Training Your Own Agent python3 -u train.py ``` **train.py** ```python from ding.bonus import TD3Agent from huggingface_ding import push_model_to_hub # Instantiate the agent agent = TD3Agent(env="Walker2d", exp_name="Walker2d-v3-TD3") # Train the agent return_ = agent.train(step=int(5000000)) # Push model to huggingface hub push_model_to_hub( agent=agent.best, env_name="OpenAI/Gym/MuJoCo", task_name="Walker2d-v3", algo_name="TD3", wandb_url=return_.wandb_url, github_repo_url="https://github.com/opendilab/DI-engine", github_doc_model_url="https://di-engine-docs.readthedocs.io/en/latest/12_policies/td3.html", github_doc_env_url="https://di-engine-docs.readthedocs.io/en/latest/13_envs/mujoco.html", installation_guide=''' sudo apt update -y \ && sudo apt install -y \ build-essential \ libgl1-mesa-dev \ libgl1-mesa-glx \ libglew-dev \ libosmesa6-dev \ libglfw3 \ libglfw3-dev \ libsdl2-dev \ libsdl2-image-dev \ libglm-dev \ libfreetype6-dev \ patchelf mkdir -p ~/.mujoco wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz tar -xf mujoco.tar.gz -C ~/.mujoco echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/.mujoco/mjpro210/bin:~/.mujoco/mujoco210/bin" >> ~/.bashrc export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/.mujoco/mjpro210/bin:~/.mujoco/mujoco210/bin pip3 install DI-engine[common_env] ''', usage_file_by_git_clone="./td3/walker2d_td3_deploy.py", usage_file_by_huggingface_ding="./td3/walker2d_td3_download.py", train_file="./td3/walker2d_td3.py", repo_id="OpenDILabCommunity/Walker2d-v3-TD3" ) ``` </details> **Configuration** <details close> <summary>(Click for Details)</summary> ```python exp_config = { 'env': { 'manager': { 'episode_num': float("inf"), 'max_retry': 1, 'retry_type': 'reset', 'auto_reset': True, 'step_timeout': None, 'reset_timeout': None, 'retry_waiting_time': 0.1, 'cfg_type': 'BaseEnvManagerDict' }, 'stop_value': 6000, 'env_id': 'Walker2d-v3', 'norm_obs': { 'use_norm': False }, 'norm_reward': { 'use_norm': False }, 'collector_env_num': 1, 'evaluator_env_num': 8, 'n_evaluator_episode': 8 }, 'policy': { 'model': { 'twin_critic': True, 'obs_shape': 17, 'action_shape': 6, 'actor_head_hidden_size': 256, 'critic_head_hidden_size': 256, 'action_space': 'regression' }, 'learn': { 'learner': { 'train_iterations': 1000000000, 'dataloader': { 'num_workers': 0 }, 'log_policy': True, 'hook': { 'load_ckpt_before_run': '', 'log_show_after_iter': 100, 'save_ckpt_after_iter': 10000, 'save_ckpt_after_run': True }, 'cfg_type': 'BaseLearnerDict' }, 'update_per_collect': 1, 'batch_size': 256, 'learning_rate_actor': 0.001, 'learning_rate_critic': 0.001, 'ignore_done': False, 'target_theta': 0.005, 'discount_factor': 0.99, 'actor_update_freq': 2, 'noise': True, 'noise_sigma': 0.2, 'noise_range': { 'min': -0.5, 'max': 0.5 } }, 'collect': { 'collector': {}, 'unroll_len': 1, 'noise_sigma': 0.1, 'n_sample': 1 }, 'eval': { 'evaluator': { 'eval_freq': 5000, 'render': { 'render_freq': -1, 'mode': 'train_iter' }, 'cfg_type': 'InteractionSerialEvaluatorDict', 'n_episode': 8, 'stop_value': 6000 } }, 'other': { 'replay_buffer': { 'replay_buffer_size': 1000000 } }, 'on_policy': False, 'cuda': True, 'multi_gpu': False, 'bp_update_sync': True, 'traj_len_inf': False, 'type': 'td3', 'priority': False, 'priority_IS_weight': False, 'random_collect_size': 25000, 'transition_with_policy_data': False, 'action_space': 'continuous', 'reward_batch_norm': False, 'multi_agent': False, 'cfg_type': 'TD3PolicyDict' }, 'exp_name': 'Walker2d-v3-TD3', 'seed': 0, 'wandb_logger': { 'gradient_logger': True, 'video_logger': True, 'plot_logger': True, 'action_logger': True, 'return_logger': False } } ``` </details> **Training Procedure** <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> - **Weights & Biases (wandb):** [monitor link](https://wandb.ai/zhangpaipai/Walker2d-v3-TD3) ## Model Information <!-- Provide the basic links for the model. --> - **Github Repository:** [repo link](https://github.com/opendilab/DI-engine) - **Doc**: [DI-engine-docs Algorithm link](https://di-engine-docs.readthedocs.io/en/latest/12_policies/td3.html) - **Configuration:** [config link](https://huggingface.co/OpenDILabCommunity/Walker2d-v3-TD3/blob/main/policy_config.py) - **Demo:** [video](https://huggingface.co/OpenDILabCommunity/Walker2d-v3-TD3/blob/main/replay.mp4) <!-- Provide the size information for the model. --> - **Parameters total size:** 845.03 KB - **Last Update Date:** 2023-04-21 ## Environments <!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. --> - **Benchmark:** OpenAI/Gym/MuJoCo - **Task:** Walker2d-v3 - **Gym version:** 0.25.1 - **DI-engine version:** v0.4.7 - **PyTorch version:** 1.7.1 - **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/mujoco.html)
Ayham/distilbert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T14:50:00Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### dreambooth-fast-new Dreambooth model trained by mohsin-riad > Name of the persons and corresponding tokens: - Tony -> sks tonydad - Barbara -> sks barbaramom - Michele -> sks michelemain - Anthony -> sks anthonybro - Liza -> sks lizasis - AJ -> sks ajnep - Michael -> sks michaelnep Try prompt such as: ``` A portrait of two people on the left sks barbaramom and right sks tonydad as a couple, detailed, centered, 8k resolution, extremely detailed, beautiful, establishing shot, artistic, hyperrealistic, beautiful face, octane render, photography ``` --- > Happy inferencing.
Ayham/ernie_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-04-21T14:51:30Z
--- language: - en tags: - openvino --- # dbmdz/bert-base-cased-finetuned-conll03-english This is the [dbmdz/bert-base-cased-finetuned-conll03-english](https://huggingface.co/dbmdz/bert-base-cased-finetuned-conll03-english) model converted to [OpenVINO](https://openvino.ai), for accellerated inference. An example of how to do inference on this model: ```python from optimum.intel.openvino import OVModelForTokenClassification from transformers import AutoTokenizer, pipeline # model_id should be set to either a local directory or a model available on the HuggingFace hub. model_id = "helenai/dbmdz-bert-base-cased-finetuned-conll03-english-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForTokenClassification.from_pretrained(model_id) pipe = pipeline("token-classification", model=model, tokenizer=tokenizer) result = pipe("My name is Wolfgang and I live in Berlin") print(result) ```
Ayham/roberta_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
2023-04-21T14:55:32Z
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - transformer3/autotrain-data-finance6 co2_eq_emissions: emissions: 0.03286397835245103 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 51355121739 - CO2 Emissions (in grams): 0.0329 ## Validation Metrics - Loss: 1.408 - Rouge1: 30.417 - Rouge2: 20.332 - RougeL: 28.167 - RougeLsum: 28.165 - Gen Len: 19.992 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/transformer3/autotrain-finance6-51355121739 ```
Ayham/robertagpt2_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-21T15:01:35Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - resumes_t2json model-index: - name: flan-t5-base-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-base-finetuned-xsum This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the resumes_t2json dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Ayham/xlnet_gpt_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1012181647993606144/TeYvs7NH_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Judea Pearl</div> <div style="text-align: center; font-size: 14px;">@yudapearl</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Judea Pearl. | Data | Judea Pearl | | --- | --- | | Tweets downloaded | 3250 | | Retweets | 222 | | Short tweets | 22 | | Tweets kept | 3006 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/dybobsva/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @yudapearl's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/idouxson) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/idouxson/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/yudapearl') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Ayoola/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model pusa88/nelu is restricted and you are not in the authorized list. Visit https://huggingface.co/pusa88/nelu to ask for access.
Ayta/Haha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T15:36:59Z
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - claudio-cyberg0n/autotrain-data-cve-sa-numeriarrotondati co2_eq_emissions: emissions: 1.189216119158559 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 51372121776 - CO2 Emissions (in grams): 1.1892 ## Validation Metrics - Loss: 1.324 - Accuracy: 0.570 - Macro F1: 0.474 - Micro F1: 0.570 - Weighted F1: 0.561 - Macro Precision: 0.506 - Micro Precision: 0.570 - Weighted Precision: 0.564 - Macro Recall: 0.462 - Micro Recall: 0.570 - Weighted Recall: 0.570 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/claudio-cyberg0n/autotrain-cve-sa-numeriarrotondati-51372121776 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("claudio-cyberg0n/autotrain-cve-sa-numeriarrotondati-51372121776", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("claudio-cyberg0n/autotrain-cve-sa-numeriarrotondati-51372121776", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
AyushPJ/ai-club-inductions-21-nlp-distilBERT
[ "pytorch", "distilbert", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T15:52:17Z
--- license: creativeml-openrail-m --- C站链接:https://civitai.com/models/47022 暂时不在这写评论了。
BSC-LT/roberta-base-biomedical-es
[ "pytorch", "roberta", "fill-mask", "es", "arxiv:2109.03570", "arxiv:2109.07765", "transformers", "biomedical", "spanish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
161
2023-04-21T16:40:46Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 263.01 +/- 16.60 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BSC-LT/roberta-base-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-04-21T16:42:01Z
--- language: - en tags: - openvino --- # anirudh21/albert-large-v2-finetuned-sst2 This is the [anirudh21/albert-large-v2-finetuned-sst2](https://huggingface.co/anirudh21/albert-large-v2-finetuned-sst2) model converted to [OpenVINO](https://openvino.ai), for accellerated inference. An example of how to do inference on this model: ```python from optimum.intel.openvino import OVModelForSequenceClassification from transformers import AutoTokenizer, pipeline # model_id should be set to either a local directory or a model available on the HuggingFace hub. model_id = "helenai/anirudh21-albert-large-v2-finetuned-sst2-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForSequenceClassification.from_pretrained(model_id) pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) result = pipe("I like you. I love you") print(result) ```
BSC-LT/roberta-large-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en tags: - openvino --- # Palak/albert-large-v2_squad This is the [Palak/albert-large-v2_squad](https://huggingface.co/Palak/albert-large-v2_squad) model converted to [OpenVINO](https://openvino.ai), for accellerated inference. An example of how to do inference on this model: ```python from optimum.intel.openvino import OVModelForQuestionAnswering from transformers import AutoTokenizer, pipeline # model_id should be set to either a local directory or a model available on the HuggingFace hub. model_id = "helenai/Palak-albert-large-v2_squad-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForQuestionAnswering.from_pretrained(model_id) pipe = pipeline("question-answering", model=model, tokenizer=tokenizer) result = pipe("What is OpenVINO?", "OpenVINO is a framework that accelerates deep learning inferencing") print(result) ```
Babysittingyoda/DialoGPT-small-familyguy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-04-21T17:16:47Z
## Usage The model can be used directly (without a language model) as follows: ```python import soundfile as sf import torch from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import argparse def parse_transcription(wav_file): # load pretrained model processor = Wav2Vec2Processor.from_pretrained("addy88/wav2vec2-kannada-stt") model = Wav2Vec2ForCTC.from_pretrained("addy88/wav2vec2-kannada-stt") # load audio audio_input, sample_rate = sf.read(wav_file) # pad input values and return pt tensor input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values # INFERENCE # retrieve logits & take argmax logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) # transcribe transcription = processor.decode(predicted_ids[0], skip_special_tokens=True) print(transcription) ```
Banshee/dialoGPT-small-luke
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T17:41:48Z
<h1 align="center">Welcome to wechat-chatgpt 👋</h1> <p> <img alt="Version" src="https://img.shields.io/badge/version-1.0.0-blue.svg?cacheSeconds=2592000" /> <a href="#" target="_blank"> <img alt="License: ISC" src="https://img.shields.io/badge/License-ISC-yellow.svg" /> </a> <a href="https://twitter.com/fuergaosi" target="_blank"> <img alt="Twitter: fuergaosi" src="https://img.shields.io/twitter/follow/fuergaosi.svg?style=social" /> </a> </a> <a href="https://discord.gg/8fXNrxwUJH" target="blank"> <img src="https://img.shields.io/discord/1058994816446369832?label=Join%20Community&logo=discord&style=flat-square" alt="join discord community of github profile readme generator"/> </a> </p> > Use ChatGPT On Wechat via wechaty > English | [中文文档](README_ZH.md) [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/dMLG70?referralCode=bIYugQ) ## 🌟 Features - Interact with WeChat and ChatGPT: - Use ChatGPT on WeChat with [wechaty](https://github.com/wechaty/wechaty) and [Official API](https://openai.com/blog/introducing-chatgpt-and-whisper-apis) - Add conversation support - Support command setting - Deployment and configuration options: - Add Dockerfile, deployable with [docker](#use-with-docker) - Support deployment using [docker compose](#use-with-docker-compose) - Support [Railway](#use-with-railway) and [Fly.io](#use-with-flyio) deployment - Other features: - Support [Dall·E](https://labs.openai.com/) - Support [whisper](https://openai.com/blog/introducing-chatgpt-and-whisper-apis) - Support setting prompt - Support proxy (in development) ## 🚀 Usage - [Use with Railway](#use-with-railway)(PaaS, Free, Stable, ✅Recommended) - [Use with Fly.io](#use-with-flyio)(Paas, Free, ✅Recommended) - [Use with docker](#use-with-docker)(Self-hosted, Stable, ✅Recommended) - [Use with docker compose](#use-with-docker-compose)(Self-hosted, Stable, ✅Recommended) - [Use with nodejs](#use-with-nodejs)(Self-hosted) ## Use with Railway > Railway offers $5 or 500 hours of runtime per month 1. Click the [Railway](https://railway.app/template/dMLG70?referralCode=bIYugQ) button to go to the Railway deployment page 2. Click the `Deploy Now` button to enter the Railway deployment page 3. Fill in the repository name and `OPENAI_API_KEY` (need to link GitHub account) 4. Click the `Deploy` button 5. Click the `View Logs` button and wait for the deployment to complete ## Use with Fly.io > Please allocate 512MB memory for the application to meet the application requirements > fly.io offers free bills up to $5(Free Allowances 3 256MB are not included in the bill) 1. Install [flyctl](https://fly.io/docs/getting-started/installing-flyctl/) ```shell # macOS brew install flyctl # Windows scoop install flyctl # Linux curl https://fly.io/install.sh | sh ``` 2. Clone the project and enter the project directory ```shell git clone https://github.com/fuergaosi233/wechat-chatgpt.git && cd wechat-chatgpt ``` 3. Create a new app ```shell ➜ flyctl launch ? Would you like to copy its configuration to the new app? No ? App Name (leave blank to use an auto-generated name): <YOUR APP NAME> ? Select region: <YOUR CHOOSE REGION> ? Would you like to setup a Postgresql database now? No ? Would you like to deploy now? No ``` 4. Configure the environment variables ```shell flyctl secrets set OPENAI_API_KEY="<YOUR OPENAI API KEY>" MODEL="<CHATGPT-MODEL>" ``` 5. Deploy the app ```shell flyctl deploy ``` ## Use with docker ```sh # pull image docker pull holegots/wechat-chatgpt # run container docker run -d --name wechat-chatgpt \ -e OPENAI_API_KEY=<YOUR OPENAI API KEY> \ -e MODEL="gpt-3.5-turbo" \ -e CHAT_PRIVATE_TRIGGER_KEYWORD="" \ -v $(pwd)/data:/app/data/wechat-assistant.memory-card.json \ holegots/wechat-chatgpt:latest # View the QR code to log in to wechat docker logs -f wechat-chatgpt ``` > How to get OPENAI API KEY? [Click here](https://platform.openai.com/account/api-keys) ## Use with docker compose ```sh # Copy the configuration file according to the template cp .env.example .env # Edit the configuration file vim .env # Start the container docker-compose up -d # View the QR code to log in to wechat docker logs -f wechat-chatgpt ``` ## Use with nodejs > You need NodeJS 18.0.0 version and above ```sh # Clone the project git clone https://github.com/fuergaosi233/wechat-chatgpt.git && cd wechat-chatgpt # Install dependencies npm install # Copy the configuration file according to the template cp .env.example .env # Edit the configuration file vim .env # Start project npm run dev ``` > Please make sure your WeChat account can log in [WeChat on web](https://wx.qq.com/) ## 📝 Environment Variables | name | default | example | description | |------------------------------|------------------------|------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ~~API~~ | https://api.openai.com | | ~~API endpoint of ChatGPT~~ | | OPENAI_API_KEY | 123456789 | sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX | [create new secret key](https://platform.openai.com/account/api-keys) | | MODEL | gpt-3.5-turbo | | ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported. | | TEMPERATURE | 0.6 | | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | | CHAT_TRIGGER_RULE | | | Private chat triggering rules. | | DISABLE_GROUP_MESSAGE | true | | Prohibited to use ChatGPT in group chat. | | CHAT_PRIVATE_TRIGGER_KEYWORD | | | Keyword to trigger ChatGPT reply in WeChat private chat | | BLOCK_WORDS | "VPN" | "WORD1,WORD2,WORD3" | Chat blocker words, (works for both private and group chats, Use, Split) | | CHATGPT_BLOCK_WORDS | "VPN" | "WORD1,WORD2,WORD3" | The blocked words returned by ChatGPT(works for both private and group chats, Use, Split) | ## 📝 Using Custom ChatGPT API > https://github.com/fuergaosi233/openai-proxy ```shell # Clone the project git clone https://github.com/fuergaosi233/openai-proxy # Install dependencies npm install && npm install -g wrangler && npm run build # Deploy to CloudFlare Workers npm run deploy # Custom domain (optional) Add `Route` to `wrangler.toml` routes = [ { pattern = "Your Custom Domain", custom_domain = true }, ] ``` ## ⌨️ Commands > Enter in the WeChat chat box ```shell /cmd help # Show help /cmd prompt <PROMPT> # Set prompt /cmd clear # Clear all sessions since last boot ``` ## ✨ Contributor <a href="https://github.com/fuergaosi233/wechat-chatgpt/graphs/contributors"> <img src="https://contrib.rocks/image?repo=fuergaosi233/wechat-chatgpt" /> </a> ## 🤝 Contributing Contributions, issues and feature requests are welcome!<br />Feel free to check [issues page](https://github.com/fuergaosi233/wechat-chatgpt/issues). ## Show your support Give a ⭐️ if this project helped you!
BaptisteDoyen/camembert-base-xnli
[ "pytorch", "tf", "camembert", "text-classification", "fr", "dataset:xnli", "transformers", "zero-shot-classification", "xnli", "nli", "license:mit", "has_space" ]
zero-shot-classification
{ "architectures": [ "CamembertForSequenceClassification" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
405,474
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1605353044124012544/9dGQd4_Q_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Machel Reid</div> <div style="text-align: center; font-size: 14px;">@machelreid</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Machel Reid. | Data | Machel Reid | | --- | --- | | Tweets downloaded | 1206 | | Retweets | 663 | | Short tweets | 126 | | Tweets kept | 417 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/imsyeurr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @machelreid's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/04ttrbuc) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/04ttrbuc/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/machelreid') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Barkavi/totto-t5-base-bert-score-121K
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
51
null
Access to model kavindu999/BetterEnglishGPT-v1 is restricted and you are not in the authorized list. Visit https://huggingface.co/kavindu999/BetterEnglishGPT-v1 to ask for access.
Barleysack/AERoberta
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-04-21T17:53:26Z
--- license: apache-2.0 datasets: - EleutherAI/the_pile language: - en library_name: transformers ---
Barleysack/AERoberta2
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - CartPole-v1 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 199.10 +/- 111.79 name: mean_reward verified: false --- # PPO Agent Playing CartPole-v1 This is a trained model of a PPO agent playing CartPole-v1. # Hyperparameters ```python {'exp_name': 'ppo.py' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'CartPole-v1' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'Emperor/ppo-CartPole-v1' 'f': '/root/.local/share/jupyter/runtime/kernel-1e988852-e898-4994-92b6-d91ce76fc467.json' 'batch_size': 512 'minibatch_size': 128} ```
Batsy24/DialoGPT-medium-Twilight_BellaBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T17:58:19Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-Q12023TEA results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-Q12023TEA This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3990 - Accuracy: 0.86 - F1: 0.8986 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cpu - Datasets 2.11.0 - Tokenizers 0.13.2
BatuhanYilmaz/marian-finetuned-kde4-en-to-fr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T18:25:49Z
# godot_dodo_4x_60k_llama_13b ## Model details Trained in April 2023. Godot-Dodo models are instruction-following models finetuned from LLaMA models. Please refer to the README of the [GitHub repository](https://github.com/minosvasilias/godot-dodo) for detailed information. ### Evaluation datasets The model was evaluated using code instruction prompts. More details in the [GitHub repository](https://github.com/minosvasilias/godot-dodo). ### Training dataset The model was trained on a 60k rows instruction following dataset, which is released in the [Github repository](https://github.com/minosvasilias/godot-dodo).
Baybars/wav2vec2-xls-r-1b-turkish
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-04-21T18:28:23Z
--- tags: - autotrain - translation language: - fr - en datasets: - ybanas/autotrain-data-fr-en-translate co2_eq_emissions: emissions: 86.90578464498235 --- # French to English Text Translation with Transformers This code allows you to translate French text into English using the `ybanas/autotrain-fr-en-translate-51410121895` model from the Transformers library. To use this code, follow the steps below: ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load the tokenizer and the model tokenizer = AutoTokenizer.from_pretrained("ybanas/autotrain-fr-en-translate-51410121895") model = AutoModelForSeq2SeqLM.from_pretrained("ybanas/autotrain-fr-en-translate-51410121895") def translate_text(french_text: str) -> str: """ Translate French text to English using the ybanas/autotrain-fr-en-translate-51410121895 model. Args: french_text (str): French text to translate. Returns: str: Translated English text. """ # Tokenize the French text inputs = tokenizer(french_text, return_tensors="pt", padding=True, truncation=True) # Generate the English translation outputs = model.generate(**inputs) # Decode the English translation english_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return english_text if __name__ == "__main__": french_text = "Les enfants aiment profiter des beaux jours" english_text = translate_text(french_text) print("French text:", french_text) print("Translated English text:", english_text) ``` ## Usage 1. Install the Transformers library by running `pip install transformers`. 2. Copy the code above into a `.py` file, for example `translation.py`. 3. Replace the value of the `french_text` variable with the French text you want to translate. 4. Run the script with `python translation.py`. The translated English text will be displayed on the screen. This script uses the `ybanas/autotrain-fr-en-translate-51410121895` model to translate French text into English. The model is loaded using the `AutoTokenizer` and `AutoModelForSeq2SeqLM` classes from the Transformers library. The `translate_text` function takes a French text as input and returns its translation in English. # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 51410121895 - CO2 Emissions (in grams): 86.9058 ## Validation Metrics - Loss: 1.455 - SacreBLEU: 15.999 - Gen len: 15.299
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,816
2023-04-21T18:36:32Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: JQED_QA_question_classifer_final results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # JQED_QA_question_classifer_final This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0329 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Bella4322/Sarah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other language: - en - zh library_name: transformers --- # 🦙 Llama for Huggingface Transformers Llama-7B converted from official [Llama-7B](https://github.com/facebookresearch/Llama/blob/main/MODEL_CARD.md) to Huggingface model via [HF's conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/Llama/convert_Llama_weights_to_hf.py) to work with Transformers/HuggingFace. This is under a special license, please see the LICENSE file for details. This is updated from [decapoda-research/llama-7b-hf](https://huggingface.co/decapoda-research/Llama-7b-hf) (since the many pull requests are not merged yet in decapoda's repo, so I directly open a new repo here). It includes: (1) The naming changes (LLaMA -> Llama) to best fit for `transformers` naming rule, in both `LlamaForCausalLM` and `LlamaTokenizer`. This works perfectly for `transformers>=4.28.0`. (2) The model checkpoints are saved in 2 shards (instead of 33 shards in [decapoda-research/Llama-7b-hf](https://huggingface.co/decapoda-research/Llama-7b-hf)). Less shards would accelerate loading speed from disk. -- license: other --- # Llama Model Card ## Model details **Organization developing the model** The FAIR team of Meta AI. **Model date** Llama was trained between December. 2022 and Feb. 2023. **Model version** This is version 1 of the model. **Model type** Llama is an auto-regressive language model, based on the transformer architecture. The model comes in different sizes: 7B, 13B, 33B and 65B parameters. **Paper or resources for more information** More information can be found in the paper “Llama, Open and Efficient Foundation Language Models”, available at https://research.facebook.com/publications/Llama-open-and-efficient-foundation-language-models/. **Citations details** https://research.facebook.com/publications/Llama-open-and-efficient-foundation-language-models/ **License** Non-commercial bespoke license **Where to send questions or comments about the model** Questions and comments about Llama can be sent via the [GitHub repository](https://github.com/facebookresearch/Llama) of the project , by opening an issue. ## Intended use **Primary intended uses** The primary use of Llama is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations. **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** Llama is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Factors **Relevant factors** One of the most relevant factors for which model performance may vary is which language is used. Although we included 20 languages in the training data, most of our dataset is made of English text, and we thus expect the model to perform better for English than other languages. Relatedly, it has been shown in previous studies that performance might vary for different dialects, and we expect that it will be the case for our model. **Evaluation factors** As our model is trained on data from the Web, we expect that it reflects biases from this source. We thus evaluated on RAI datasets to measure biases exhibited by the model for gender, religion, race, sexual orientation, age, nationality, disability, physical appearance and socio-economic status. We also measure the toxicity of model generations, depending on the toxicity of the context used to prompt the model. ## Metrics **Model performance measures** We use the following measure to evaluate the model: - Accuracy for common sense reasoning, reading comprehension, natural language understanding (MMLU), BIG-bench hard, WinoGender and CrowS-Pairs, - Exact match for question answering, - The toxicity score from Perspective API on RealToxicityPrompts. **Decision thresholds** Not applicable. **Approaches to uncertainty and variability** Due to the high computational requirements of training LLMs, we trained only one model of each size, and thus could not evaluate variability of pre-training. ## Evaluation datasets The model was evaluated on the following benchmarks: BoolQ, PIQA, SIQA, HellaSwag, WinoGrande, ARC, OpenBookQA, NaturalQuestions, TriviaQA, RACE, MMLU, BIG-bench hard, GSM8k, RealToxicityPrompts, WinoGender, CrowS-Pairs. ## Training dataset The model was trained using the following source of data: CCNet [67%], C4 [15%], GitHub [4.5%], Wikipedia [4.5%], Books [4.5%], ArXiv [2.5%], Stack Exchange[2%]. The Wikipedia and Books domains include data in the following languages: bg, ca, cs, da, de, en, es, fr, hr, hu, it, nl, pl, pt, ro, ru, sl, sr, sv, uk. See the paper for more details about the training set and corresponding preprocessing. ## Quantitative analysis Hyperparameters for the model architecture <table> <thead> <tr> <th >Llama</th> <th colspan=6>Model hyper parameters </th> </tr> <tr> <th>Number of parameters</th><th>dimension</th><th>n heads</th><th>n layers</th><th>Learn rate</th><th>Batch size</th><th>n tokens</th> </tr> </thead> <tbody> <tr> <th>7B</th> <th>4096</th> <th>32</th> <th>32</th> <th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>13B</th><th>5120</th><th>40</th><th>40</th><th>3.0E-04</th><th>4M</th><th>1T </tr> <tr> <th>33B</th><th>6656</th><th>52</th><th>60</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> <tr> <th>65B</th><th>8192</th><th>64</th><th>80</th><th>1.5.E-04</th><th>4M</th><th>1.4T </tr> </tbody> </table> *Table 1 - Summary of Llama Model Hyperparameters* We present our results on eight standard common sense reasoning benchmarks in the table below. <table> <thead> <tr> <th>Llama</th> <th colspan=9>Reasoning tasks </th> </tr> <tr> <th>Number of parameters</th> <th>BoolQ</th><th>PIQA</th><th>SIQA</th><th>HellaSwag</th><th>WinoGrande</th><th>ARC-e</th><th>ARC-c</th><th>OBQA</th><th>COPA</th> </tr> </thead> <tbody> <tr> <th>7B</th><th>76.5</th><th>79.8</th><th>48.9</th><th>76.1</th><th>70.1</th><th>76.7</th><th>47.6</th><th>57.2</th><th>93 </th> <tr><th>13B</th><th>78.1</th><th>80.1</th><th>50.4</th><th>79.2</th><th>73</th><th>78.1</th><th>52.7</th><th>56.4</th><th>94 </th> <tr><th>33B</th><th>83.1</th><th>82.3</th><th>50.4</th><th>82.8</th><th>76</th><th>81.4</th><th>57.8</th><th>58.6</th><th>92 </th> <tr><th>65B</th><th>85.3</th><th>82.8</th><th>52.3</th><th>84.2</th><th>77</th><th>81.5</th><th>56</th><th>60.2</th><th>94</th></tr> </tbody> </table> *Table 2 - Summary of Llama Model Performance on Reasoning tasks* We present our results on bias in the table below. Note that lower value is better indicating lower bias. | No | Category | FAIR LLM | | --- | -------------------- | -------- | | 1 | Gender | 70.6 | | 2 | Religion | 79 | | 3 | Race/Color | 57 | | 4 | Sexual orientation | 81 | | 5 | Age | 70.1 | | 6 | Nationality | 64.2 | | 7 | Disability | 66.7 | | 8 | Physical appearance | 77.8 | | 9 | Socioeconomic status | 71.5 | | | Llama Average | 66.6 | *Table 3 - Summary bias of our model output* ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Mitigations** We filtered the data from the Web based on its proximity to Wikipedia text and references. For this, we used a Kneser-Ney language model and a fastText linear classifier. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** Llama is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-04-21T19:11:53Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: pizza_chain_spell_correction results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pizza_chain_spell_correction This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.2744 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 32 | 2.4588 | | No log | 2.0 | 64 | 2.2744 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
BhanuSama/gpt2-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T19:21:17Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="srinivasvl81/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Bharathdamu/wav2vec2-large-xls-r-300m-hindi
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-04-21T19:26:02Z
# Vocabulary Trimmed [vocabtrimmer/xlm-v-base-xnli-de](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-de): `vocabtrimmer/xlm-v-base-xnli-de-trimmed-de` This model is a trimmed version of [vocabtrimmer/xlm-v-base-xnli-de](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-de) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | vocabtrimmer/xlm-v-base-xnli-de | vocabtrimmer/xlm-v-base-xnli-de-trimmed-de | |:---------------------------|:----------------------------------|:---------------------------------------------| | parameter_size_full | 778,495,491 | 269,819,139 | | parameter_size_embedding | 692,451,072 | 183,774,720 | | vocab_size | 901,629 | 239,290 | | compression_rate_full | 100.0 | 34.66 | | compression_rate_embedding | 100.0 | 26.54 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | | 2 |
Bharathdamu/wav2vec2-large-xls-r-300m-hindi2-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# `vocabtrimmer/xlm-v-base-xnli-ar` This model is a fine-tuned version of [facebook/xlm-v-base](https://huggingface.co/facebook/xlm-v-base) on the [xnli](https://huggingface.co/datasets/xnli) (ar). Following metrics are computed on the `test` split of [xnli](https://huggingface.co/datasets/xnli)(ar). | | eval_f1_micro | eval_recall_micro | eval_precision_micro | eval_f1_macro | eval_recall_macro | eval_precision_macro | eval_accuracy | |---:|----------------:|--------------------:|-----------------------:|----------------:|--------------------:|-----------------------:|----------------:| | 0 | 75.51 | 75.51 | 75.51 | 75.4 | 75.51 | 76.4 | 75.51 | Check the result file [here](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-ar/raw/main/eval.json).
Bharathdamu/wav2vec2-model-hindi-stt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: geovanyuribe/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Bharathdamu/wav2vec2-model-hindibhasha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T19:30:56Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 239.76 +/- 22.53 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Bhuvana/t5-base-spellchecker
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
93
2023-04-21T19:38:37Z
--- language: en tags: - transformer licence: apache-2.0 --- # Sandbox Following [this](https://machinelearningmastery.com/building-transformer-models-with-attention-crash-course-build-a-neural-machine-translator-in-12-days/) tutorial to figure out how to implement a transformer
Bia18/Beatriz
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1323391305834143745/4zqOJh66_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Farza 🇵🇰🇺🇸</div> <div style="text-align: center; font-size: 14px;">@farzatv</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Farza 🇵🇰🇺🇸. | Data | Farza 🇵🇰🇺🇸 | | --- | --- | | Tweets downloaded | 3246 | | Retweets | 69 | | Short tweets | 787 | | Tweets kept | 2390 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/izd8flbr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @farzatv's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/qz26zcpj) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/qz26zcpj/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/farzatv') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
BigSalmon/BertaMyWorda
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-21T19:51:02Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1642647714549710854/QlI3xw3I_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">kache (yacine)</div> <div style="text-align: center; font-size: 14px;">@yacinemtb</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from kache (yacine). | Data | kache (yacine) | | --- | --- | | Tweets downloaded | 3200 | | Retweets | 266 | | Short tweets | 690 | | Tweets kept | 2244 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/6ds0n54s/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @yacinemtb's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/t1ivll23) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/t1ivll23/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/yacinemtb') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
BigSalmon/GPT2HardArticleEasyArticle
[ "pytorch", "jax", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - pt --- Esta é a primeira versão de uma inteligência artificial finetunada que fala em português do brasil, ela foi treinada em cima do llama 7b de decapoda, e foi treinada no LLaMA-LoRA Tuner de zetavg utilizando o dataset da cabrita lora Divirta-se!
BigSalmon/GPTIntro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-base-finetuned-context-dataset results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-finetuned-context-dataset This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.0+cu111 - Datasets 2.11.0 - Tokenizers 0.10.3
BigSalmon/GPTNeo350MInformalToFormalLincoln3
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image pipeline_tag: text-to-image --- <b>Introduction:</b> CharHelper_Fine_Tuned_V2 has been trained with SD 2.1 as a base at 768x768 resolution as an update to the previous version. It has additional training on anthropomorphism, dinosaurs, reptiles, animals, aquatic creatures, ninjas, wrestlers, food, diners, gardens, and fairgrounds. <br /> ## Usage: The CFG Scale is much less sensitive in this version and can acheive good results between 4 and 9. I recommend using the <h>[Dynamic Thresholding Extension](https://github.com/mcmonkeyprojects/sd-dynamic-thresholding)</h> for this model. It becomes much more coherent when it is enabled with the following settings: ![Settings](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/DTSettings.JPG) This model also can benefit from the <h>[Unprompted Extension's](https://github.com/ThereforeGames/unprompted)</h> zoom_enhance tool as it likes to output longer range images. <b>Use Auto for the vae in settings. If you are using a vae based on a SDv1.5 model, you may not get the best results.</b> <br /> Prompts work better when using complete sentences vs the SDv1.x "8k, intricate, etc." type of format. Keywords are not necessary but I've kept the options for them open. Play around with mixing them up for interesting outputs. They work best with the <h>[Prompt Editing Feature](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing)</h> which let's the generation focus on the keywords for the first 20% and then can be removed before the image gets too chaotic or vice versa. Using Prompt Editing for artist names as well has had good results. <b>Keywords:</b> <b>Character Styles:</b> CHV3CWrestler, CHV3CReptile, CHV3CAnimal, CHV3CNinja, CHV3CAnthro, CHV3CDino, CHV3CFoodPorn, CHV3CDeepSea, CHV3CBigChief, CHV3CBoxer, CHV3CUrban, CHV3COrc, CHV3CGanesh, CHV3CGolem,CHV3CCyberpunk, CHV3CSamurai, CHV3CRobot, CHV3CZombie, CHV3CBird, CHV3MDragon, CHV3CKnight, CHV3CWizard, CHV3CBarb, CHV3CVehicle, CHV3CTroll, CHV3CReaper, CHV3CRogue, CHV3CAlien <b>Scenery/Styles:</b> CHV3SDiner, CHV3SGarden, CHV3SFair, CHV3SUrban, CHV3SEldritch, CHV3SLighthouse, CHV3SCute, CHV3SMacro, CHV3SSciFi, CHV3SWorld ## Examples: ![Meerkats](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01289-3929403383-a%20meerkat%20with%20soft%20silky%20fur%20standing%20on%20top%20of%20a%20counter%20in%20an%20indian%20marketplace%2C%20doing%20a%20majestic%20pose%2C%20taken%20in%20an%20Indian%20b.png) <b>Meerkats</b> a meerkat with soft silky fur standing on top of a counter in an indian marketplace, doing a majestic pose, taken in an Indian bazaar, standing tall, CHV3CAnimal, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: unibrow, text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 10, Sampler: DPM++ SDE, CFG scale: 4, Seed: 3929403383, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Elves](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01016-3860491171-a%20portrait%20of%20a%20dryad%20elf%20queen%20with%20green%20hair%20and%20wooden%20attire%2C%20%5B_beautiful%20face%2C_.25%5D%20fey%20queen%20of%20the%20summer%20forest%2C%208k%20stu.png) <b>Elves</b> a portrait of a dryad elf queen with green hair and wooden attire, [:beautiful face,:.25] fey queen of the summer forest, 8k stunning artwork, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: unibrow, text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy, nfixer Steps: 10, Sampler: DPM++ SDE, CFG scale: 6, Seed: 3860491171, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Big Fountains](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00976-1146635289-The%20statue%20face%20of%20a%20woman%2C%20perfect%20composition%2C%20Professional%2C%20masterpiece%2C%20commissioned%2C%20best%20quality%2C%20Color%20Corrected%2C%20fixed%20i.png) <b>Big Fountains</b> Dynamic Thresholding Enabled<br /> zoom_enhance Enabled a beautiful baroque statue of an angel on top of a fountain inside a royal greenhouse garden, angel statues, ornate and flowing, fountain in the middle, beautiful image, CHV3SGarden, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: framed, cropped, over-exposed, over-saturated, amateur, (b&w), (close-up), (duplicate), (deformed), blurry, (bad proportions), gross proportions, ugly, tiling, poorly drawn, mutation, mutated, disfigured, deformed, out of frame, blurry, bad art, text, logo, signature, watermark, cross-eyes Steps: 10, Sampler: DPM++ SDE, CFG scale: 7.0, Seed: 66984331, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 5.5, Threshold percentile: 98.2, Mimic mode: Half Cosine Down, Mimic scale minimum: 4, CFG mode: Half Cosine Down, CFG scale minimum: 4, Score: 6.63 ![Muppets At The Diner](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00564-3898371140-two%20muppets%20eating%20at%20a%20diner%2C%20hyper%20detailed%2C%20studio%20quality%2C%20%5BCHV3CDiner%2C__.10%5D%20perfect%20composition%2C%20Professional%2C%20masterpiece.png) <b>Muppets At The Diner</b> two muppets eating at a diner, hyper detailed, studio quality, [CHV3CDiner,::.10] perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: framed, cropped, over-exposed, over-saturated, amateur, (b&w), (close-up), (duplicate), (deformed), blurry, (bad proportions), gross proportions, ugly, tiling, poorly drawn, mutation, mutated, disfigured, deformed, out of frame, blurry, bad art, text, logo, signature, watermark, cross-eyes Steps: 10, Sampler: DPM++ SDE, CFG scale: 6.5, Seed: 3898371140, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Colorful Fish](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00405-3100432446-a%20realistically%20detailed%20portrait%20of%20a%20beautiful%20giant%20colorful%20betta%20fish%20swimming%20in%20the%20water%20near%20a%20coral%20reef%2C%20perfect%20arti.png) <b>Colorful Fish</b> a realistically detailed portrait of a beautiful giant colorful betta fish swimming in the water near a coral reef, perfect artistic composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 10, Sampler: DPM++ SDE, CFG scale: 3.5, Seed: 3100432446, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Anthropomorphic Alligators](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00270-736097322-an%20image%20of%20an%20anthropomorphic%20alligator%20in%20a%20cowboy%20costume%20in%20a%20bioluminescent%20swamp%2C%20concept%20art%2C%20photogrammetry%2C%20jurassic%20im.png) <b>Anthropomorphic Alligators</b> Dynamic Thresholding Enabled an image of an anthropomorphic alligator in a cowboy costume in a bioluminescent swamp, concept art, photogrammetry, jurassic image, gnomon, official product photo, hybrid human/anthro, intimidating appearance, CHV3CAnthro, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 10, Sampler: DPM++ SDE, CFG scale: 9.5, Seed: 736097322, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 5.5, Threshold percentile: 100, Score: 7.0 ![Dragons](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00182-511226435-a%20medium%20range%20shot%20of%20a%20red%20dragon%20flying%20through%20the%20air%2C%20Wings%20outstretched%2C%20sharp%20focus%2C%20an%20illustration%2C%20monster%20creature%20c.png) <b>Dragons</b> a medium range shot of a red dragon flying through the air, Wings outstretched, sharp focus, an illustration, monster creature concept art, fantasy concept art, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 10, Sampler: DPM++ SDE, CFG scale: 7.5, Seed: 511226435, Size: 768x896, Model hash: 6b5ef03039, Denoising strength: 0.72, ENSD: 3, Mask blur: 4 ![Dinosaurs](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00749-4026615536-an%20illustration%20of%20a%20tyrannosaurus%20rex%20in%20a%20forest%2C%20auto-destructive%20art%2C%20closeup%2C%20ancient%20magus%2C%20jenna%20barton%2C%20drawn%20and%20painte.png) <b>Dinosaurs</b> an illustration of a tyrannosaurus rex in a forest, auto-destructive art, closeup, ancient magus, jenna barton, drawn and painted, rotoscoped, full res, CHV3CDino, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: framed, cropped, over-exposed, over-saturated, amateur, (b&w), (close-up), (duplicate), (deformed), blurry, (bad proportions), gross proportions, ugly, tiling, poorly drawn, mutation, mutated, disfigured, deformed, out of frame, blurry, bad art, text, logo, signature, watermark, cross-eyes Steps: 10, Sampler: DPM++ SDE, CFG scale: 6.5, Seed: 4026615536, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Pancakes](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00857-4020971795-a%20towering%20landscape%20of%20pancakes%20dripping%20with%20maple%20syrup%20and%20blueberries%20on%20a%20table%2C%20autumn%20wind%2C%20contest%20winner%202021%2C%20%F0%9F%8E%80%20%F0%9F%8D%93%20%F0%9F%A7%9A%2C.png) <b>Pancakes</b> Dynamic Thresholding Enabled a towering landscape of pancakes dripping with maple syrup and blueberries on a table, autumn wind, contest winner 2021, 🎀 🍓 🧚, harvest, sofya emelenko, (sweet night ambient, bokeh lights in the background:1.1), CHV3CFoodPorn, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: framed, cropped, over-exposed, over-saturated, amateur, (b&w), (close-up), (duplicate), (deformed), blurry, (bad proportions), gross proportions, ugly, tiling, poorly drawn, mutation, mutated, disfigured, deformed, out of frame, blurry, bad art, text, logo, signature, watermark, cross-eyes Steps: 10, Sampler: DPM++ SDE, CFG scale: 4, Seed: 4020971795, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 4, Threshold percentile: 98.2, Mimic mode: Half Cosine Down, Mimic scale minimum: 4, CFG mode: Half Cosine Down, CFG scale minimum: 4 ![Gardens](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/00878-3646861612-a%20garden%20path%20with%20a%20tunnel%20of%20glowing%20flowers%20at%20night%2C%20blossoming%20path%20to%20heaven%2C%20floral%20environment%2C%20beautiful%20scene%2C%20CHV3SGa.png) <b>Gardens</b> Dynamic Thresholding Enabled a garden path with a tunnel of glowing flowers at night, blossoming path to heaven, floral environment, beautiful scene, CHV3SGarden, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: framed, cropped, over-exposed, over-saturated, amateur, (b&w), (close-up), (duplicate), (deformed), blurry, (bad proportions), gross proportions, ugly, tiling, poorly drawn, mutation, mutated, disfigured, deformed, out of frame, blurry, bad art, text, logo, signature, watermark, cross-eyes Steps: 10, Sampler: DPM++ SDE, CFG scale: 4.0, Seed: 3646861612, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 4, Threshold percentile: 98.2, Mimic mode: Half Cosine Down, Mimic scale minimum: 4, CFG mode: Half Cosine Down, CFG scale minimum: 4 ![Ninjas](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01221-140919870-a%20realistic%20detail%20of%20a%20waist%20up%20portrait%20of%20a%20%5BCHV3CNinja__.25%5D%20person%20in%20an%20ornate%20royal%20cyberpunk%20beautiful%20silver%20and%20white.png) <b>Ninjas</b> a realistic detail of a waist up portrait of a [CHV3CNinja::.25] person in an ornate royal cyberpunk beautiful silver and white porcelain ninja outfit in an alley in neo-Tokyo, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: unibrow, text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 14, Sampler: DPM++ SDE, CFG scale: 9.5, Seed: 140919870, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![The Fair](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01357-547939668-a%20realistic%20studio%20ghibli%20anime%20style%20illustration%20of%20a%20retro%20futuristic%20carnival%20ride%20with%20many%20people%20in%20it%20at%20night%20at%20a%20crow.png) <b>The Fair</b> a realistic studio ghibli anime style illustration of a retro futuristic carnival ride with many people in it at night at a crowded fair filled with amusement park attractions and a giant ferris wheel lit up in the background, artwork by wlop, [CHV3SFair, :CHV3CVehicle, CHV3CRobot style architecture, :.35] perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: unibrow, text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 7, Seed: 547939668, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![Wrestling](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01430-3985005664-two%20men%20are%20fighting%20in%20a%20wrestling%20ring%2C%20jonathan%20winterhart%2C%20taken%20in%20the%20early%202020s%2C%20clayton%20crain%2C%20aaron%20earley%2C%20majestic%20s.png) <b>Wrestling</b> two men are fighting in a wrestling ring, jonathan winterhart, taken in the early 2020s, clayton crain, aaron earley, majestic sweeping action, awestriking, photo still, ny, CHV3CWrestler, picture taken in the early 1990s, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: unibrow, text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 7, Seed: 3985005664, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3 ![The Reaper](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01634-1931457294-a%20waist%20up%20skull%20faced%20portrait%20of%20an%20evil%20demented%20CHV3CZombie%2C%20CHV3CReaper%20style%20zombie%20priest%20of%20death%20adorned%20in%20ornate%20roya.png) <b>The Reaper</b> Dynamic Thresholding Enabled a waist up skull faced portrait of an evil demented CHV3CZombie, CHV3CReaper style zombie priest of death adorned in ornate royal black robes and a Papal tiara at a sinister crypt altar, candles, and roses, high resolution, award-winning picture in the style of the diablo video game franchise, centered, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 8, Seed: 1931457294, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 5.5, Threshold percentile: 98.75, Mimic mode: Half Cosine Down, Mimic scale minimum: 3, CFG mode: Half Cosine Down, CFG scale minimum: 3 ![Golems](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01643-4274520324-A%20steampunk%20robot%20golem%20made%20out%20of%20royal%20armor%20and%20large%20gears%2C%20CHV3CGolem%2C%20CHV3CRobot%2C%20reasonable%20fantasy%2C%20realistic%2C%20detailed.png) <b>Golems</b> Dynamic Thresholding Enabled A steampunk robot golem made out of royal armor and large gears, CHV3CGolem, CHV3CRobot, reasonable fantasy, realistic, detailed, tabletop rpg, ghostblade, wlop and tooth wu, perfect composition, Professional, masterpiece, commissioned, best quality, Color Corrected, fixed in post, emended, ameliorated, idyllic Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 16.5, Seed: 4274520324, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 5.5, Threshold percentile: 98.75, Mimic mode: Half Cosine Down, Mimic scale minimum: 3, CFG mode: Half Cosine Down, CFG scale minimum: 3 ![Ornate Headwear](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01781-1167136804-a%20idyllic%20commissioned%20ameliorated%20masterpiece%20of%20the%20best%20quality%20of%20a%20Professional%20realistic%20detail%20with%20Color%20Corrected%2C%20perf.png) <b>Ornate Headwear</b> Dynamic Thresholding Enabled a idyllic commissioned ameliorated masterpiece of the best quality of a Professional realistic detail with Color Corrected, perfect composition and soft tones of (an analog photograph of a female shaman [:with beautiful eyes:.25] wearing wooden CHV3CBarb style tribal armor and a [afro-dieselpunk tribal:cyberpunk:.45] feather headdress) Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: Euler a, CFG scale: 4, Seed: 1167136804, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 9, Threshold percentile: 95, Mimic mode: Half Cosine Down, Mimic scale minimum: 0, CFG mode: Half Cosine Down, CFG scale minimum: 9 ![Luchadores](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01809-1615347830-a%20idyllic%20commissioned%20ameliorated%20masterpiece%20of%20the%20best%20quality%20of%20a%20Professional%20realistic%20detail%20with%20Color%20Corrected%2C%20perf.png) <b>Luchadores</b> Dynamic Thresholding Enabled a idyllic commissioned ameliorated masterpiece of the best quality of a Professional realistic detail with Color Corrected, perfect composition and soft tones of (Armor King is a wrestler with the head of a leopard [:with scary eyes:.25] wearing [uturistic:tribal:.45] style gear whilst in a victory pose in the [CHV3CWrestler style::.25] wrestling ring) Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: Euler a, CFG scale: 8.5, Seed: 1615347830, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 1.5, Threshold percentile: 95, Mimic mode: Half Cosine Down, Mimic scale minimum: 0, CFG mode: Half Cosine Down, CFG scale minimum: 0 ![Racoon Chefs](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01820-1723773295-a%20idyllic%20commissioned%20ameliorated%20masterpiece%20of%20the%20best%20quality%20of%20a%20Professional%20realistic%20detail%20with%20Color%20Corrected%2C%20perf.png) <b>Racoon Chefs</b> Dynamic Thresholding Enabled a idyllic commissioned ameliorated masterpiece of the best quality of a Professional realistic detail with Color Corrected, perfect composition and soft tones of (An anthropomorphic person with a racoon head in a chef's costume cooking scallops, wearing a chef hat, in a kitchen) Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 6.5, Seed: 1723773295, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 1.5, Threshold percentile: 95, Mimic mode: Half Cosine Down, Mimic scale minimum: 0, CFG mode: Half Cosine Down, CFG scale minimum: 0 ![Octopi? Octopodes? Octopuses?](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01826-3547502290-a%20idyllic%20commissioned%20ameliorated%20masterpiece%20of%20the%20best%20quality%20of%20a%20Professional%20realistic%20detail%20with%20Color%20Corrected%2C%20perf.png) <b>Octopi? Octopodes? Octopuses?</b> Dynamic Thresholding Enabled a idyllic commissioned ameliorated masterpiece of the best quality of a Professional realistic detail with Color Corrected, perfect composition and soft tones of (A cute little octopus in a small circular fish tank) Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 7, Seed: 3547502290, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 1.5, Threshold percentile: 95, Mimic mode: Half Cosine Down, Mimic scale minimum: 0, CFG mode: Half Cosine Down, CFG scale minimum: 0 ![Orcs](https://huggingface.co/ManglerFTW/CharHelper_Fine_Tuned_V2/resolve/main/Images/01842-1500008643-an%20idyllic%20commissioned%20ameliorated%20masterpiece%20of%20the%20best%20quality%20of%20a%20Professional%20realistic%20detail%20with%20Color%20Corrected%2C%20per.png) <b>Orcs</b> Dynamic Thresholding Enabled an idyllic commissioned ameliorated masterpiece of the best quality of a Professional realistic detail with Color Corrected, perfect composition and soft tones of (a waist up portrait of a CHV3COrc orc in the mountains) Negative prompt: text, logo, signature, over-saturated, over-exposed, amateur, extra limbs, extra barrel, b&w, close-up, duplicate, mutilated, extra fingers, mutated hands, deformed, blurry, bad proportions, extra limbs, cloned face, out of frame, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, tripod, tube, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy Steps: 20, Sampler: DPM++ SDE, CFG scale: 7, Seed: 1500008643, Size: 768x896, Model hash: 6b5ef03039, ENSD: 3, Dynamic thresholding enabled: True, Mimic scale: 1.5, Threshold percentile: 95, Mimic mode: Half Cosine Down, Mimic scale minimum: 0, CFG mode: Half Cosine Down, CFG scale minimum: 0
BigSalmon/GPTNeo350MInformalToFormalLincoln6
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2023-04-21T20:50:10Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: taebinkim/distilbert-base-uncased-finetuned-cola results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # taebinkim/distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1922 - Validation Loss: 0.5547 - Train Matthews Correlation: 0.5294 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1602, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Matthews Correlation | Epoch | |:----------:|:---------------:|:--------------------------:|:-----:| | 0.5174 | 0.4663 | 0.4685 | 0 | | 0.3252 | 0.4865 | 0.4966 | 1 | | 0.1922 | 0.5547 | 0.5294 | 2 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
BigSalmon/GPTT
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tresnet_l.miil_in1k A TResNet image classification model. Trained on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 56.0 - GMACs: 10.9 - Activations (M): 11.9 - Image size: 224 x 224 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/TResNet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_l.miil_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_l.miil_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 76, 56, 56]) # torch.Size([1, 152, 28, 28]) # torch.Size([1, 1216, 14, 14]) # torch.Size([1, 2432, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_l.miil_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2432, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/GoodMaskResults
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tresnet_l.miil_in1k_448 A TResNet image classification model. Trained on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 56.0 - GMACs: 43.6 - Activations (M): 47.6 - Image size: 448 x 448 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/TResNet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_l.miil_in1k_448', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_l.miil_in1k_448', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 76, 112, 112]) # torch.Size([1, 152, 56, 56]) # torch.Size([1, 1216, 28, 28]) # torch.Size([1, 2432, 14, 14]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_l.miil_in1k_448', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2432, 14, 14) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/InformalToFormalLincoln14
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tresnet_m.miil_in1k A TResNet image classification model. Trained on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 31.4 - GMACs: 5.8 - Activations (M): 7.3 - Image size: 224 x 224 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/TResNet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_m.miil_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_m.miil_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_m.miil_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/InformalToFormalLincoln17
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.01 +/- 21.90 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/InformalToFormalLincoln20
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k-p --- # Model card for tresnet_m.miil_in21k_ft_in1k A TResNet image classification model. Pretrained on ImageNet-21K-P ("ImageNet-21K Pretraining for the Masses", a 11k subset of ImageNet-22k) and fine-tuned on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 31.4 - GMACs: 5.8 - Activations (M): 7.3 - Image size: 224 x 224 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - ImageNet-21K Pretraining for the Masses: https://arxiv.org/abs/2104.10972 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21K-P - **Original:** - https://github.com/Alibaba-MIIL/TResNet - https://github.com/Alibaba-MIIL/ImageNet21K ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_m.miil_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_m.miil_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_m.miil_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```bibtex @misc{ridnik2021imagenet21k, title={ImageNet-21K Pretraining for the Masses}, author={Tal Ridnik and Emanuel Ben-Baruch and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={2104.10972}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/InformalToFormalLincoln22
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k-p --- # Model card for tresnet_v2_l.miil_in21k_ft_in1k A TResNet image classification model. Pretrained on ImageNet-21K-P ("ImageNet-21K Pretraining for the Masses", a 11k subset of ImageNet-22k) and fine-tuned on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 46.2 - GMACs: 8.8 - Activations (M): 16.3 - Image size: 224 x 224 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - ImageNet-21K Pretraining for the Masses: https://arxiv.org/abs/2104.10972 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21K-P - **Original:** - https://github.com/Alibaba-MIIL/TResNet - https://github.com/Alibaba-MIIL/ImageNet21K ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_v2_l.miil_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_v2_l.miil_in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 2048, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_v2_l.miil_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```bibtex @misc{ridnik2021imagenet21k, title={ImageNet-21K Pretraining for the Masses}, author={Tal Ridnik and Emanuel Ben-Baruch and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={2104.10972}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/InformalToFormalLincoln24
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: led-finetuned-meetings results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # led-finetuned-meetings This model is a fine-tuned version of [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) on the [knkarthick/AMI](https://huggingface.co/datasets/knkarthick/AMI) dataset. It achieves the following results on the evaluation set: - Loss: 2.2191 - Rouge2 Precision: 0.141 - Rouge2 Recall: 0.1886 - Rouge2 Fmeasure: 0.1541 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure | |:-------------:|:-----:|:----:|:---------------:|:----------------:|:-------------:|:---------------:| | 1.964 | 0.63 | 20 | 2.2191 | 0.141 | 0.1886 | 0.1541 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
BigSalmon/InformalToFormalLincoln25
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tresnet_xl.miil_in1k A TResNet image classification model. Trained on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 78.4 - GMACs: 15.2 - Activations (M): 15.3 - Image size: 224 x 224 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/TResNet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_xl.miil_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_xl.miil_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 83, 56, 56]) # torch.Size([1, 166, 28, 28]) # torch.Size([1, 1328, 14, 14]) # torch.Size([1, 2656, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_xl.miil_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2656, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/InformalToFormalLincolnDistilledGPT2
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tresnet_xl.miil_in1k_448 A TResNet image classification model. Trained on ImageNet-1k by paper authors. The weights for this model have been remapped and modified from the originals to work with standard BatchNorm instead of InplaceABN. `inplace_abn` can be problematic to build recently and ends up slower with `memory_format=channels_last`, torch.compile(), etc. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 78.4 - GMACs: 60.8 - Activations (M): 61.3 - Image size: 448 x 448 - **Papers:** - TResNet: High Performance GPU-Dedicated Architecture: https://arxiv.org/abs/2003.13630 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/TResNet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tresnet_xl.miil_in1k_448', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_xl.miil_in1k_448', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 83, 112, 112]) # torch.Size([1, 166, 56, 56]) # torch.Size([1, 1328, 28, 28]) # torch.Size([1, 2656, 14, 14]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tresnet_xl.miil_in1k_448', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2656, 14, 14) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{ridnik2020tresnet, title={TResNet: High Performance GPU-Dedicated Architecture}, author={Tal Ridnik and Hussam Lawen and Asaf Noy and Itamar Friedman}, year={2020}, eprint={2003.13630}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BigSalmon/Lincoln4
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - classifier - generated_from_trainer datasets: - ag_news metrics: - accuracy model-index: - name: deep_model_09_clasificador-news-2 results: - task: name: Text Classification type: text-classification dataset: name: ag_news type: ag_news config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.9033149171270718 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deep_model_09_clasificador-news-2 This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on the ag_news dataset. It achieves the following results on the evaluation set: - Loss: 0.4530 - Accuracy: 0.9033 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6332 | 1.0 | 715 | 0.4676 | 0.8812 | | 0.5148 | 2.0 | 1430 | 0.4496 | 0.9006 | | 0.3638 | 3.0 | 2145 | 0.4530 | 0.9033 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
BigSalmon/MrLincoln12
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1640912030784684032/b_IdaEz7_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Solomon Wycliffe</div> <div style="text-align: center; font-size: 14px;">@solomonwycliffe</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Solomon Wycliffe. | Data | Solomon Wycliffe | | --- | --- | | Tweets downloaded | 265 | | Retweets | 1 | | Short tweets | 21 | | Tweets kept | 243 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/iaqs4n8b/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @solomonwycliffe's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/l8tie1ub) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/l8tie1ub/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/solomonwycliffe') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
BigSalmon/MrLincoln125MNeo
[ "pytorch", "tensorboard", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: diffusers pipeline_tag: text-to-image language: - en tags: - stable diffusion ---
BigSalmon/MrLincoln2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 68.24 +/- 121.75 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo.py' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 5000000 'learning_rate': 0.0001 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'Emperor/LunarLander-v2-unit8' 'f': None 'batch_size': 512 'minibatch_size': 128} ```
BigSalmon/MrLincoln3
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
Quantized version of this: https://huggingface.co/ausboss/llama-30b-supercot GPTQ quantization using https://github.com/0cc4m/GPTQ-for-LLaMa for compatibility with 0cc4m's fork of KoboldAI Command used to quantize: ```python llama.py c:\llama-30b-supercot c4 --wbits 4 --true-sequential --groupsize 128 --save_safetensors 4bit-128g.safetensors``` Evaluation & Score (Lower is better): * WikiText2: 4.51 * PTB: 17.46 * C4: 6.37 Non-groupsize version is here: https://huggingface.co/tsumeone/llama-30b-supercot-4bit-cuda
BigSalmon/MrLincoln8
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_a.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.3 - GMACs: 0.2 - Activations (M): 4.4 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_a.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_a.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_a.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/MrLincolnBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_b.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.2 - GMACs: 0.3 - Activations (M): 5.1 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_b.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_b.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_b.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/NEO125InformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_c.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.5 - GMACs: 0.3 - Activations (M): 5.0 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_c.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_c.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_c.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/Neo
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_d.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 7.5 - GMACs: 0.3 - Activations (M): 4.9 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_d.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_d.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_d.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/ParaphraseParentheses
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_e.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 8.1 - GMACs: 0.4 - Activations (M): 5.6 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_e.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_e.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_e.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/ParaphraseParentheses2.0
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for hardcorenas_f.miil_green_in1k A HardCoReNAS image classification model. Trained on ImageNet-1k by paper authors with their "green" recipe. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 8.2 - GMACs: 0.4 - Activations (M): 5.6 - Image size: 224 x 224 - **Papers:** - HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search: https://arxiv.org/abs/2102.11646 - **Dataset:** ImageNet-1k - **Original:** https://github.com/Alibaba-MIIL/HardCoReNAS ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hardcorenas_f.miil_green_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_f.miil_green_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hardcorenas_f.miil_green_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @misc{nayman2021hardcorenas, title={HardCoRe-NAS: Hard Constrained diffeRentiable Neural Architecture Search}, author={Niv Nayman and Yonathan Aflalo and Asaf Noy and Lihi Zelnik-Manor}, year={2021}, eprint={https://arxiv.org/abs/2102.11646}, archivePrefix={arXiv}, primaryClass={cs.LG} } ```
BigSalmon/Points
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
## This is a conditional pretrained version of EleutherAI's gpt-neox-20b model. Conditional pretrained models are trained the same way as normal large language models, but the training data is prepared in a special way. For conditional pretrained models every training example is prepended with tags or labels that are appropriate for the document and a short synopsis (see example below). This model was trained starting with the public gpt-neox-20b model weights on huggingface for about 1 million 2048 token context length conditional pretraining examples from a subsample of the pile. Thank you to LAION and Stability AI for compute and their support of opensource projects. ## Standardard Pretraining Large amounts of text are tokenized and split up (usually randomly) into chunks that match the maximum context length of the model. Sometimes there is special packaging of the training examples. ## Conditional Pretraining Every training example is close to the maximum context length and is prepended with descriptive labels that are appropriate for classifying the document. A short synopsis of the training example is also added to the training example. ## How to use this model? You can use this model the same way as any other pretrained model, just prepend your prompt with the tags and the synopsis. \[ history of the world, the enlightenment, scientific breakthroughs, detailed account of history] \# A short history of the key scientific breakthroughs of the enlightenment >1. The Scientific Revolution: This was a period in which scientists began to question some basic assumptions about how the universe worked and what it contained. It started with Nicolaus Copernicus (1473–1543), who suggested that the Earth revolved around the Sun rather than vice versa. He also proposed that the planets moved round the Sun as well – an idea called heliocentrism. In 1543, his book On the Revolutions of the Heavenly Spheres was published by Andreas Osiander. Although this work didn't mention Copernicus's name at all, it did contain arguments against Ptolemy's theory on planetary motion. As such, it helped to spread knowledge of Copernicus' ideas throughout Europe. > >2. Galileo Galilei (1564–1642): Galileo is best known for discovering the four largest moons of Jupiter using a telescope made out of two lenses. However, he had many other achievements during his life ## Content Warning Although conditional pretraining helps to align the model with user expectations, there are still many situations where the model could create disturbing or insensitive content, especially if you use higher temperature settings or repetition penalties. These models are trained on large amounts of text from the internet and inherit all the good and *bad* ideas that can be found online. Try starting your tags with SFW and using tags like "rated G" or "politically correct" or "sensitive tone" or other similar concepts to help the model stay aligned with your desires.
BigSalmon/SimplifyText
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: deberta-v3-large-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-large-finetuned-ner This model is a fine-tuned version of [microsoft/deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4130 - Precision: 0.8219 - Recall: 0.8955 - F1: 0.8571 - Accuracy: 0.9310 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 45 | 1.0375 | 0.4072 | 0.2743 | 0.3278 | 0.7192 | | No log | 2.0 | 90 | 0.7673 | 0.4724 | 0.3914 | 0.4281 | 0.7522 | | No log | 3.0 | 135 | 0.6973 | 0.4892 | 0.6637 | 0.5633 | 0.7757 | | No log | 4.0 | 180 | 0.6645 | 0.5209 | 0.7237 | 0.6058 | 0.7961 | | No log | 5.0 | 225 | 0.4692 | 0.6618 | 0.7041 | 0.6823 | 0.8644 | | No log | 6.0 | 270 | 0.4469 | 0.6902 | 0.7552 | 0.7213 | 0.8776 | | No log | 7.0 | 315 | 0.4761 | 0.6713 | 0.8123 | 0.7351 | 0.8745 | | No log | 8.0 | 360 | 0.3956 | 0.7524 | 0.8063 | 0.7784 | 0.9055 | | No log | 9.0 | 405 | 0.4272 | 0.7298 | 0.8332 | 0.7781 | 0.8976 | | No log | 10.0 | 450 | 0.4285 | 0.7520 | 0.8577 | 0.8014 | 0.9096 | | No log | 11.0 | 495 | 0.4022 | 0.7764 | 0.8693 | 0.8202 | 0.9147 | | 0.4557 | 12.0 | 540 | 0.3584 | 0.8090 | 0.8640 | 0.8356 | 0.9287 | | 0.4557 | 13.0 | 585 | 0.4022 | 0.8102 | 0.8733 | 0.8405 | 0.9253 | | 0.4557 | 14.0 | 630 | 0.4149 | 0.8067 | 0.8902 | 0.8464 | 0.9268 | | 0.4557 | 15.0 | 675 | 0.4160 | 0.8188 | 0.8919 | 0.8538 | 0.9290 | | 0.4557 | 16.0 | 720 | 0.4015 | 0.8173 | 0.8932 | 0.8536 | 0.9302 | | 0.4557 | 17.0 | 765 | 0.4084 | 0.8215 | 0.8945 | 0.8565 | 0.9309 | | 0.4557 | 18.0 | 810 | 0.4133 | 0.8219 | 0.8955 | 0.8571 | 0.9307 | | 0.4557 | 19.0 | 855 | 0.4131 | 0.8217 | 0.8955 | 0.8570 | 0.9310 | | 0.4557 | 20.0 | 900 | 0.4130 | 0.8219 | 0.8955 | 0.8571 | 0.9310 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
2023-04-21T21:43:40Z
# Vocabulary Trimmed [vocabtrimmer/xlm-v-base-xnli-es](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-es): `vocabtrimmer/xlm-v-base-xnli-es-trimmed-es` This model is a trimmed version of [vocabtrimmer/xlm-v-base-xnli-es](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-es) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | vocabtrimmer/xlm-v-base-xnli-es | vocabtrimmer/xlm-v-base-xnli-es-trimmed-es | |:---------------------------|:----------------------------------|:---------------------------------------------| | parameter_size_full | 778,495,491 | 272,949,507 | | parameter_size_embedding | 692,451,072 | 186,905,088 | | vocab_size | 901,629 | 243,366 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 26.99 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | | 2 |
BigSalmon/T5Salmon
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
2023-04-21T21:51:17Z
--- title: Forest Fire Detection emoji: 🔥 colorFrom: gray colorTo: red sdk: gradio sdk_version: 3.20.0 app_file: app.py pinned: true license: mit language: - en --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
BigSalmon/T5Salmon2
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
13
2023-04-21T21:56:14Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn68.mx_in1k A DPN (Dual-Path Net) image classification model. Trained on ImageNet-1k in MXNet by paper authors and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 12.6 - GMACs: 2.4 - Activations (M): 10.5 - Image size: 224 x 224 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - **Dataset:** ImageNet-1k - **Original:** https://github.com/cypw/DPNs ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn68.mx_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68.mx_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 10, 112, 112]) # torch.Size([1, 144, 56, 56]) # torch.Size([1, 320, 28, 28]) # torch.Size([1, 704, 14, 14]) # torch.Size([1, 832, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68.mx_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 832, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ```
BigSalmon/TS3
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-04-21T21:56:35Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn68b.mx_in1k A DPN (Dual-Path Net) image classification model. Trained on ImageNet-1k in MXNet by paper authors and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 12.6 - GMACs: 2.4 - Activations (M): 10.5 - Image size: 224 x 224 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - **Dataset:** ImageNet-1k - **Original:** https://github.com/cypw/DPNs ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn68b.mx_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68b.mx_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 10, 112, 112]) # torch.Size([1, 144, 56, 56]) # torch.Size([1, 320, 28, 28]) # torch.Size([1, 704, 14, 14]) # torch.Size([1, 832, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68b.mx_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 832, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ```
BigSalmon/prepositions
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn68b.ra_in1k A DPN (Dual-Path Net) classification model. Pretrained on ImageNet-1k in `timm` by Ross Wightman using RandAugment `RA` recipe. Related to `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 12.6 - GMACs: 2.4 - Activations (M): 10.5 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn68b.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68b.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 10, 112, 112]) # torch.Size([1, 144, 56, 56]) # torch.Size([1, 320, 28, 28]) # torch.Size([1, 704, 14, 14]) # torch.Size([1, 832, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn68b.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 832, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
BigTooth/DialoGPT-Megumin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn92.mx_in1k A DPN (Dual-Path Net) image classification model. Trained on ImageNet-1k in MXNet by paper authors and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 37.7 - GMACs: 6.5 - Activations (M): 18.2 - Image size: 224 x 224 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - **Dataset:** ImageNet-1k - **Original:** https://github.com/cypw/DPNs ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn92.mx_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn92.mx_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 336, 56, 56]) # torch.Size([1, 704, 28, 28]) # torch.Size([1, 1552, 14, 14]) # torch.Size([1, 2688, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn92.mx_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2688, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ```
BigTooth/DialoGPT-small-tohru
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-04-21T21:57:45Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn98.mx_in1k A DPN (Dual-Path Net) image classification model. Trained on ImageNet-1k in MXNet by paper authors and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 61.6 - GMACs: 11.7 - Activations (M): 25.2 - Image size: 224 x 224 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - **Dataset:** ImageNet-1k - **Original:** https://github.com/cypw/DPNs ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn98.mx_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn98.mx_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 96, 112, 112]) # torch.Size([1, 336, 56, 56]) # torch.Size([1, 768, 28, 28]) # torch.Size([1, 1728, 14, 14]) # torch.Size([1, 2688, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn98.mx_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2688, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ```
BigTooth/Megumin-v0.2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for dpn107.mx_in1k A DPN (Dual-Path Net) image classification model. Trained on ImageNet-1k in MXNet by paper authors and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 86.9 - GMACs: 18.4 - Activations (M): 33.5 - Image size: 224 x 224 - **Papers:** - Dual Path Networks: https://arxiv.org/abs/1707.01629 - **Dataset:** ImageNet-1k - **Original:** https://github.com/cypw/DPNs ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('dpn107.mx_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn107.mx_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 112, 112]) # torch.Size([1, 376, 56, 56]) # torch.Size([1, 1152, 28, 28]) # torch.Size([1, 2432, 14, 14]) # torch.Size([1, 2688, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'dpn107.mx_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2688, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @article{Chen2017, title={Dual Path Networks}, author={Yunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng}, journal={arXiv preprint arXiv:1707.01629}, year={2017} } ```
BinksSachary/DialoGPT-small-shaxx
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
# Vocabulary Trimmed [vocabtrimmer/xlm-v-base-xnli-ar](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-ar): `vocabtrimmer/xlm-v-base-xnli-ar-trimmed-ar` This model is a trimmed version of [vocabtrimmer/xlm-v-base-xnli-ar](https://huggingface.co/vocabtrimmer/xlm-v-base-xnli-ar) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | vocabtrimmer/xlm-v-base-xnli-ar | vocabtrimmer/xlm-v-base-xnli-ar-trimmed-ar | |:---------------------------|:----------------------------------|:---------------------------------------------| | parameter_size_full | 778,495,491 | 157,462,275 | | parameter_size_embedding | 692,451,072 | 71,417,856 | | vocab_size | 901,629 | 92,992 | | compression_rate_full | 100.0 | 20.23 | | compression_rate_embedding | 100.0 | 10.31 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | | 2 |
Blerrrry/Kkk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T22:30:40Z
--- language: - en license: creativeml-openrail-m thumbnail: "https://huggingface.co/Guizmus/SDArt_Encapsulated/resolve/main/showcase.jpg" tags: - stable-diffusion - text-to-image - image-to-image --- # SDArt : Encapsulated (version based on 1.5) ![Showcase](https://huggingface.co/Guizmus/SDArt_Encapsulated/resolve/main/showcase.jpg) ## Theme What if the world was in the palm of your hands? Condensed, contained, and captured within a simple sphere for all to see? * Create your own world encapsulated within an orb, sphere, container etc. This can be any type of world or landscape you can imagine, but it must be confined within the boundaries of the orb. * Bring your miniature world to life. Big things come in small packages! * A world made of crystals and moss? A lush forest landscape? An upside-down world? A world made of instruments? A world made of tangled wires? Be creative! Be uniquely you! ## Model description This is a model related to the "Picture of the Week" contest on [Stable Diffusion discord](https://discord.gg/stablediffusion). I try to make a model out of all the submission for people to continue enjoy the theme after the even, and see a little of their designs in other people's creations. The token stays "SDArt" and I balance the learning on the low side, so that it doesn't just replicate creations. The total dataset is made of 36 pictures. It was trained on [Stable diffusion 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5). I used [EveryDream](https://github.com/victorchall/EveryDream2trainer) to do the training, 100 total repeat per picture. The pictures were tagged using the token "SDArt", and an arbitrary token I choose. The dataset is provided below, as well as a list of usernames and their corresponding token. The recommended sampling is k_Euler_a or DPM++ 2M Karras on 20 steps, CFGS 7.5 . [The model is also available here](https://huggingface.co/Guizmus/SDArt_Encapsulated768) in a version trained on 2.1 as a base. ## Trained tokens * SDArt * bnp * aten * fcu * cous * aved * arum * omd * kuro * asot * psst * buon * utm * vaw * mss * guin * mgt * crit * isch * phol * vedi * dds * acu * pte * oxi * rean * reba * reem * revs * rith * rmb * rolf * ront * rps * rsc * gare * shld ## Download links [SafeTensors](https://huggingface.co/Guizmus/SDArt_Encapsulated/resolve/main/SDArt_Encapsulated.safetensors) [Dataset](https://huggingface.co/Guizmus/SDArt_Encapsulated/resolve/main/dataset.zip) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Guizmus/SDArt_Encapsulated" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "SDArt vedi" image = pipe(prompt).images[0] image.save("./SDArt.png") ```
BlightZz/DialoGPT-medium-Kurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- language: - en license: creativeml-openrail-m thumbnail: "https://huggingface.co/Guizmus/SDArt_Encapsulated768/resolve/main/showcase.jpg" tags: - stable-diffusion - text-to-image - image-to-image --- # SDArt : Encapsulated (version based on 2.1 768px) ![Showcase](https://huggingface.co/Guizmus/SDArt_Encapsulated768/resolve/main/showcase.jpg) ## Theme What if the world was in the palm of your hands? Condensed, contained, and captured within a simple sphere for all to see? * Create your own world encapsulated within an orb, sphere, container etc. This can be any type of world or landscape you can imagine, but it must be confined within the boundaries of the orb. * Bring your miniature world to life. Big things come in small packages! * A world made of crystals and moss? A lush forest landscape? An upside-down world? A world made of instruments? A world made of tangled wires? Be creative! Be uniquely you! ## Model description This is a model related to the "Picture of the Week" contest on [Stable Diffusion discord](https://discord.gg/stablediffusion). I try to make a model out of all the submission for people to continue enjoy the theme after the even, and see a little of their designs in other people's creations. The token stays "SDArt" and I balance the learning on the low side, so that it doesn't just replicate creations. The total dataset is made of 36 pictures. It was trained on [Stable diffusion 2.1 768px](https://huggingface.co/stabilityai/stable-diffusion-2-1). I used [EveryDream](https://github.com/victorchall/EveryDream2trainer) to do the training, 100 total repeat per picture. The pictures were tagged using the token "SDArt", and an arbitrary token I choose. The dataset is provided below, as well as a list of usernames and their corresponding token. The recommended sampling is k_Euler_a or DPM++ 2M Karras on 20 steps, CFGS 7.5 . [The model is also available here](https://huggingface.co/Guizmus/SDArt_Encapsulated) in a version trained on 1.5 as a base. ## Trained tokens * SDArt * bnp * aten * fcu * cous * aved * arum * omd * kuro * asot * psst * buon * utm * vaw * mss * guin * mgt * crit * isch * phol * vedi * dds * acu * pte * oxi * rean * reba * reem * revs * rith * rmb * rolf * ront * rps * rsc * gare * shld ## Download links [SafeTensors](https://huggingface.co/Guizmus/SDArt_Encapsulated768/resolve/main/SDArt_Encapsulated768.safetensors) [Config (yaml)](https://huggingface.co/Guizmus/SDArt_Encapsulated768/resolve/main/SDArt_Encapsulated768.yaml) [Dataset](https://huggingface.co/Guizmus/SDArt_Encapsulated768/resolve/main/dataset.zip) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Guizmus/SDArt_Encapsulated768" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "SDArt kuro" image = pipe(prompt).images[0] image.save("./SDArt.png") ```
BobBraico/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: keyword_category_classifier_v6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # keyword_category_classifier_v6 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2283 - Accuracy: 0.9331 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3075 | 1.0 | 1688 | 0.2638 | 0.9169 | | 0.2008 | 2.0 | 3376 | 0.2283 | 0.9331 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
BobBraico/distilbert-base-uncased-finetuned-imdb
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet121.ra_in1k A DenseNet image classification model. Pretrained on ImageNet-1k in `timm` by Ross Wightman using RandAugment `RA` recipe. Related to `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 8.0 - GMACs: 2.9 - Activations (M): 6.9 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet121.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet121.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet121.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
BonjinKim/dst_kor_bert
[ "pytorch", "jax", "bert", "pretraining", "transformers" ]
null
{ "architectures": [ "BertForPreTraining" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-04-21T22:53:48Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet121.tv_in1k A DenseNet image classification model. Trained on ImageNet-1k (original torchvision weights). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 8.0 - GMACs: 2.9 - Activations (M): 6.9 - Image size: 224 x 224 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet121.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet121.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1024, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet121.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ```
Boondong/Wandee
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet161.tv_in1k A DenseNet image classification model. Trained on ImageNet-1k (original torchvision weights). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 28.7 - GMACs: 7.8 - Activations (M): 11.1 - Image size: 224 x 224 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet161.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet161.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 96, 112, 112]) # torch.Size([1, 384, 56, 56]) # torch.Size([1, 768, 28, 28]) # torch.Size([1, 2112, 14, 14]) # torch.Size([1, 2208, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet161.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2208, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ```
BossLee/t5-gec
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet169.tv_in1k A DenseNet image classification model. Trained on ImageNet-1k (original torchvision weights). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 14.1 - GMACs: 3.4 - Activations (M): 7.3 - Image size: 224 x 224 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet169.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet169.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1280, 14, 14]) # torch.Size([1, 1664, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet169.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1664, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ```
Botjallu/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for densenet201.tv_in1k A DenseNet image classification model. Trained on ImageNet-1k (original torchvision weights). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 20.0 - GMACs: 4.3 - Activations (M): 7.9 - Image size: 224 x 224 - **Papers:** - Densely Connected Convolutional Networks: https://arxiv.org/abs/1608.06993 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('densenet201.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet201.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 1792, 14, 14]) # torch.Size([1, 1920, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'densenet201.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1920, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{huang2017densely, title={Densely Connected Convolutional Networks}, author={Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q }, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, year={2017} } ```
Brayan/CNN_Brain_Tumor
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-21T23:11:53Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for ese_vovnet19b_dw.ra_in1k A VoVNet-v2 image classification model. Pretrained on ImageNet-1k in `timm` by Ross Wightman using RandAugment `RA` recipe. Related to `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 6.5 - GMACs: 1.3 - Activations (M): 8.2 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - An Energy and GPU-Computation Efficient Backbone Network: https://arxiv.org/abs/1904.09730 - CenterMask : Real-Time Anchor-Free Instance Segmentation: https://arxiv.org/abs/1911.06667 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('ese_vovnet19b_dw.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'ese_vovnet19b_dw.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 768, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'ese_vovnet19b_dw.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{lee2019energy, title = {An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection}, author = {Lee, Youngwan and Hwang, Joong-won and Lee, Sangrok and Bae, Yuseok and Park, Jongyoul}, booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops}, year = {2019} } ``` ```bibtex @article{lee2019centermask, title={CenterMask: Real-Time Anchor-Free Instance Segmentation}, author={Lee, Youngwan and Park, Jongyoul}, booktitle={CVPR}, year={2020} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
BrianTin/MTBERT
[ "pytorch", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-04-21T23:14:16Z
--- tags: - generated_from_trainer model-index: - name: t5-MCQ-question-generator_v1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-MCQ-question-generator_v1 This model is a fine-tuned version of [Bilkies/t5-MCQ-question-generator](https://huggingface.co/Bilkies/t5-MCQ-question-generator) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 8 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Brona/model1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - HuggingFaceM4/vatex language: - en metrics: - bleu - meteor - rouge pipeline_tag: text-generation inference: false tags: - video-captioning --- # TimeSformer-GPT2 Video Captioning Vision Encoder Model: [timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600) \ Text Decoder Model: [gpt2](https://huggingface.co/gpt2) #### Evaluation Result: 67.2 CIDEr on [VaTeX](https://eric-xw.github.io/vatex-website/index.html) public test set #### Example Inference Code: ```python import av import numpy as np import torch from transformers import AutoImageProcessor, AutoTokenizer, VisionEncoderDecoderModel device = "cuda" if torch.cuda.is_available() else "cpu" # load pretrained processor, tokenizer, and model image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") tokenizer = AutoTokenizer.from_pretrained("gpt2") model = VisionEncoderDecoderModel.from_pretrained("Neleac/timesformer-gpt2-video-captioning").to(device) # load video video_path = "never_gonna_give_you_up.mp4" container = av.open(video_path) # extract evenly spaced frames from video seg_len = container.streams.video[0].frames clip_len = model.config.encoder.num_frames indices = set(np.linspace(0, seg_len, num=clip_len, endpoint=False).astype(np.int64)) frames = [] container.seek(0) for i, frame in enumerate(container.decode(video=0)): if i in indices: frames.append(frame.to_ndarray(format="rgb24")) # generate caption gen_kwargs = { "min_length": 10, "max_length": 20, "num_beams": 8, } pixel_values = image_processor(frames, return_tensors="pt").pixel_values.to(device) tokens = model.generate(pixel_values, **gen_kwargs) caption = tokenizer.batch_decode(tokens, skip_special_tokens=True)[0] print(caption) # A man and a woman are dancing on a stage in front of a mirror. ``` #### Author Information: 👾 [Discord](https://discordapp.com/users/297770280863137802) \ 🐙 [GitHub](https://github.com/Neleac) \ 🤝 [LinkedIn](https://www.linkedin.com/in/caelenw/)
Brykee/BrykeeBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: FrozenLake results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="tooucci/FrozenLake", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Bryson575x/riceboi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for xception41.tf_in1k An Aligned Xception image classification model. Trained on ImageNet-1k in Tensorflow and ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 27.0 - GMACs: 9.3 - Activations (M): 39.9 - Image size: 299 x 299 - **Papers:** - Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation: https://arxiv.org/abs/1802.02611 - Xception: Deep Learning with Depthwise Separable Convolutions: https://arxiv.org/abs/1610.02357 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/models/blob/master/research/deeplab/ ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('xception41.tf_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'xception41.tf_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 150, 150]) # torch.Size([1, 256, 75, 75]) # torch.Size([1, 728, 38, 38]) # torch.Size([1, 1024, 19, 19]) # torch.Size([1, 2048, 10, 10]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'xception41.tf_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 10, 10) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{deeplabv3plus2018, title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, booktitle={ECCV}, year={2018} } ``` ```bibtex @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
BumBelDumBel/TRUMP
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-04-21T23:43:33Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for xception65.ra3_in1k An Aligned Xception image classification model. Pretrained on ImageNet-1k in `timm` by Ross Wightman using RandAugment `RA3` recipe. Related to `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 39.9 - GMACs: 14.0 - Activations (M): 52.5 - Image size: 299 x 299 - **Papers:** - Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation: https://arxiv.org/abs/1802.02611 - Xception: Deep Learning with Depthwise Separable Convolutions: https://arxiv.org/abs/1610.02357 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('xception65.ra3_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'xception65.ra3_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 150, 150]) # torch.Size([1, 256, 75, 75]) # torch.Size([1, 728, 38, 38]) # torch.Size([1, 1024, 19, 19]) # torch.Size([1, 2048, 10, 10]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'xception65.ra3_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 10, 10) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @inproceedings{deeplabv3plus2018, title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, booktitle={ECCV}, year={2018} } ``` ```bibtex @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
Access to model BaherElnaggar/autotrain-arabic-sentiment-analysis-51469121981 is restricted and you are not in the authorized list. Visit https://huggingface.co/BaherElnaggar/autotrain-arabic-sentiment-analysis-51469121981 to ask for access.
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: cc-by-4.0 datasets: - yahma/alpaca-cleaned - EleutherAI/the_pile language: - en pipeline_tag: text-generation tags: - alpaca - peft - gpt - lora - llm --- # Model Card We provide a low-rank adapter for an instruction-tuned 6.7B-parameter GPT3-style language model. Example instruction: ``` Write a list of things I need to do to plan a watch party for Game 4 of the Knicks-Cavs playoff series. ``` Corresponding response from the model: ``` 1. Find out where the game will be played and purchase tickets. 2. Decide on food, drinks, decorations, and entertainment. 3. Plan the route to get there. 4. Arrange transportation. 5. Organize games and activities for guests. 6. Make sure everyone has their tickets. 7. Set up the venue. 8. Prepare for the event. 9. Clean up after the event. 10. Celebrate with friends and family. ``` ## Quickstart Setting up the model using `transformers` and `peft`: ```python from peft import PeftModel from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("cerebras/Cerebras-GPT-6.7B") tokenizer.pad_token_id = 0 model = AutoModelForCausalLM.from_pretrained( "cerebras/Cerebras-GPT-6.7B", load_in_8bit=True, device_map="auto", ) model = PeftModel.from_pretrained( model, "lucasmccabe-lmi/GPT-6.7B-CleanedAlpaca-1" ) ``` ## Prompting For instructions that **do not** require extraneous inputs, the recommended prompt is: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: < -- instruction goes here --- > ### Response: ``` For instructions that **do** require extraneous inputs, the recommended prompt is: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: < -- instruction goes here -- > ### Input: < -- extraneous input goes here -- > ### Response: ``` Since the model performs [causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling), the model's response to the prompt is the text completing the sequence beginning with the prompt. ## Instruction-Tuning This model was instruction-tuned on [a cleaned version of the Stanford Alpaca dataset](https://github.com/gururise/AlpacaDataCleaned), consisting of 52k post-processed instruction-input-output triplets derived from OpenAI's `text-davinci-003`. - **Epochs**: 3 - **Batch size**: 128 - **Cutoff length**: 512 - **Learning rate**: 2e-5 - **LoRA _r_**: 4 - **LoRA _alpha_**: 16 - **LoRA _dropout_**: 0.05 - **LoRA target modules**: `c_attn` - **Dataset**: [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned) - **License**: The instruction-tuning data is subject to the [Creative Commons 4.0](https://creativecommons.org/licenses/by/4.0/) license. ## Base Model This model was instruction-tuned from a 6.7B variant from the Cerebras-GPT family. These models were pre-trained to the ["Chinchilla-optimal"](https://arxiv.org/abs/2203.15556) 20*6.7B tokens from [EleutherAI/The Pile](https://huggingface.co/datasets/EleutherAI/the_pile). - **Repository:** [cerebras/Cerebras-GPT-6.7B](https://huggingface.co/cerebras/Cerebras-GPT-6.7B) - **Paper:** [arxiv:2304.03208](https://arxiv.org/abs/2304.03208) - **License**: The base model is subject to the Apache 2.0 license. - **Model type**: Transformer-based Language Model ## Software We used [LMI's](https://huggingface.co/lmiconsulting) internal `liger` library, which is built on `PyTorch` and the excellent Hugging Face stack (`transformers`, `accelerate`, etc.). ## Licensing Information We release this adapter under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/) license. ## Author - [lucasmccabe-lmi](https://lucasmccabe.github.io/)
CSZay/bart
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 - precision model-index: - name: distilbert-base-uncased_emotion_ft_0416 results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.935 - name: F1 type: f1 value: 0.9351297545369408 - name: Precision type: precision value: 0.9086804558548226 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased_emotion_ft_0416 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1489 - Accuracy: 0.935 - F1: 0.9351 - Precision: 0.9087 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:| | 0.7803 | 1.0 | 250 | 0.2696 | 0.916 | 0.9141 | 0.9056 | | 0.2098 | 2.0 | 500 | 0.1888 | 0.9275 | 0.9278 | 0.8974 | | 0.1392 | 3.0 | 750 | 0.1546 | 0.932 | 0.9324 | 0.9034 | | 0.1084 | 4.0 | 1000 | 0.1489 | 0.935 | 0.9351 | 0.9087 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.13.1+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
Canyonevo/DialoGPT-medium-KingHenry
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-22T04:06:05Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="innovation64/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Capreolus/bert-base-msmarco
[ "pytorch", "tf", "jax", "bert", "text-classification", "arxiv:2008.09093", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
238
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="innovation64/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Captain272/lstm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: sumitk/ppo-SnowballTarget-2 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
dccuchile/albert-large-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### kpop-lsa-2500 Dreambooth model trained by Thuong with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
dccuchile/albert-large-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: VcRlAgent/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
dccuchile/albert-large-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
* * * language: * en tags: * causal-lm * code-generation * code-completion license: * cc-by-nc-sa-4.0 datasets: * ehartford/leet10k-alpaca * Dampish/MPTE_dante * * * STABLEKODA-3B Low Rank Adaption =============================== Model Description ----------------- `STABLEKODA-3B` is a 3B parameter decoder-only language model built on top of the `StableLM-Base-Alpha` models and further fine-tuned for code generation and code completion tasks. Model Details ------------- * **Developed by**: [Stability AI](https://stability.ai/) * **Model type**: STABLEKODA-3B is an auto-regressive language LoRA model adapter based on the NeoX transformer architecture, fine tuned for programming and code. Training -------- Parameters - 3B + 32 LoRA RParams Hidden Size - 16 (0.8 LoRA Alpha) Layers - 32 Heads - 32 Sequence Length - 4096
dccuchile/albert-tiny-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Hariprasath28/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```