modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Czapla/Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-28T22:58:55Z
--- license: openrail url: https://play.google.com/store/apps/details?id=com.hussle.skimpex_app&gl=DE ---
D3xter1922/electra-base-discriminator-finetuned-mnli
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-28T23:14:21Z
--- language: en tags: - sagemaker - bart - summarization license: apache-2.0 datasets: - tomasg25/scientific_lay_summarisation model-index: - name: bart-large-tomasg25/scientific_lay_summarisation results: - task: name: Abstractive Text Summarization type: abstractive-text-summarization dataset: name: "tomasg25/scientific_lay_summarisation" type: plos metrics: - name: Validation ROGUE-1 type: rogue-1 value: 42.621 - name: Validation ROGUE-2 type: rogue-2 value: 21.9825 - name: Validation ROGUE-L type: rogue-l value: 33.034 - name: Test ROGUE-1 type: rogue-1 value: 41.3174 - name: Test ROGUE-2 type: rogue-2 value: 20.8716 - name: Test ROGUE-L type: rogue-l value: 32.1337 widget: --- ## `bart-large-tomasg25/scientific_lay_summarisation` This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container. For more information look at: - [πŸ€— Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) ## Hyperparameters { "cache_dir": "opt/ml/input", "dataset_config_name": "plos", "dataset_name": "tomasg25/scientific_lay_summarisation", "do_eval": true, "do_predict": true, "do_train": true, "fp16": true, "learning_rate": 5e-05, "model_name_or_path": "facebook/bart-large", "num_train_epochs": 1, "output_dir": "/opt/ml/model", "per_device_eval_batch_size": 4, "per_device_train_batch_size": 4, "predict_with_generate": true, "seed": 7 } ## Usage from transformers import pipeline summarizer = pipeline("summarization", model="sambydlo/bart-large-tomasg25/scientific_lay_summarisation") article = "Food production is a major driver of greenhouse gas (GHG) emissions, water and land use, and dietary risk factors are contributors to non-communicable diseases. Shifts in dietary patterns can therefore potentially provide benefits for both the environment and health. However, there is uncertainty about the magnitude of these impacts, and the dietary changes necessary to achieve them. We systematically review the evidence on changes in GHG emissions, land use, and water use, from shifting current dietary intakes to environ- mentally sustainable dietary patterns. We find 14 common sustainable dietary patterns across reviewed studies, with reductions as high as 70–80% of GHG emissions and land use, and 50% of water use (with medians of about 20–30% for these indicators across all studies) possible by adopting sustainable dietary patterns. Reductions in environmental footprints were generally proportional to the magnitude of animal-based food restriction. Dietary shifts also yielded modest benefits in all-cause mortality risk. Our review reveals that environmental and health benefits are possible by shifting current Western diets to a variety of more sustainable dietary patterns." summarizer(article) ## Results | key | value | | --- | ----- | | eval_rouge1 | 41.3889 | | eval_rouge2 | 13.3641 | | eval_rougeL | 24.3154 | | eval_rougeLsum | 36.612 | | test_rouge1 | 41.4786 | | test_rouge2 | 13.3787 | | test_rougeL | 24.1558 | | test_rougeLsum | 36.7723 |
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 264.72 +/- 25.30 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DCU-NLP/bert-base-irish-cased-v1
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "generated_from_keras_callback", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,244
2023-05-19T04:58:31Z
--- license: wtfpl pipeline_tag: text-to-image tags: - Civitai - mirror - stable diffusion --- # A mirror of anything interesting on civitai or elsewhere on the web. This repo is *mostly* organized around the structure that's necessary to import the entire repo into an automatic1111 install. This is because models were originally uploaded directly from such an install via google drive in a colab session. Currently I'm uploading files via my new space over at https://hf.co/anonderpling/repo_uploader, however I expect to go back to a real file system and git uploads soon, hopefully paperspace can help with that. ## Workflow My workflow for downloading files into paperspace gradient is to download the entire repo *without* pulling LFS files, do a sparse checkout, and then pull the files I want with LFS --include (slow) or aria2 (fast). This workflow should work with colab, too. Whether you use colab or paperspace, you'll probably need the latest version of git to use `sparse-checkout --add`. ## How to upload and download files <details> <summary>sparse checkout with git, bypassing LFS, download with aria2c</summary> ```bash !GIT_LFS_SPARSE_CHECKOUT=1 git clone git@hf.co:anonderpling/civitai_mirror # this is my command so I can push changes, you'll need to use the https://hf.co/ instead of git@hf.co: !cd civitai_mirror !git sparse-checkout set embeddings # embeddings are small, so it's easy enough to just pull all of them !git sparse-checkout add models/VAE # there's only a few VAEs, and they're generally needed, so grab all those too... !git sparse-checkout add models/Stable-diffusion/illuminati* models/Stable-diffusion/revAnimated* # add some stable diffusion models I intend to work with in this session !apt install aria2 # make sure aria2c is installed # let's break the following command down into parts, since there's multiple commands on one line # find models embeddings --type f --size -2 # find files in models and embeddings directories smaller than 2 kilobytes (these are the lfs pointers that were checked out) # | while read a; do #lets build an aria2c input file # echo "https://huggingface.co/anonderpling/civitai_mirror/resolve/main/${a}"; # tell aria2c where to find the file # echo " out=${a}"; # tell aria2c where to place said file # rm "${a}"; remove the existing file, because I'm too lazy to look up the option to have aria2c overwrite it (plus if you stop in the middle, you can tell at a glance what else is needed) # done | tee aria2.in.txt # end the loop, but watch to make sure theres nothing accidentally included by wildcards that shouldnt have been...downloads could take a while (and fill the disk) if I accidentally put a space before the * # aria2 -x16 --split=16 -i aria2.in.txt # download all the files as fast as possible !find models embeddings --type f --size -2 | while read a; do echo "https://huggingface.co/anonderpling/civitai_mirror/resolve/main/${a}"; echo " out=${a}"; rm "${a}"; done | tee aria2.in.txt !aria2 -x16 --split=16 -i aria2.in.txt ``` </details> #### Uploading files: - [repo uploader](https://anonderpling-repo-uploader.hf.space), for single files. requires write permission to repo. - [Civitai model uploader](https://mirroring-upload-civitai-model.hf.space), for uploading a model from Civitai. can create PR. intended for uploading model previews and civitai's API response json as well as the model, for use by Civitai unofficial extension. </details> <details> <summary>to upload more files via git</summary> ```bash # enable the git lfs filters !pip install huggingface_hub !huggingface-cli lfs_enable_largefiles . # yup, not telling. I'm an *anonymous* derpling, after all !git config --local user.name 'not telling' # really couldnt care less if this is accurate...maybe I'll start randomizing it... !git config --local user.email 'anonderpling@users.noreply.huggingface.co' # create an rsa key with no password !ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_hf.co -P '' # a clickable link in jupyter/colab/paperspace print('https://hf.co/settings/keys') # give the public key so it can be easily copied to huggingface !cat ~/.ssh/id_hf.co.pub # track files 1mb+ with lfs manually (huggingface filter deals with models automatically, but large previews will give you errors) !find -type f -size +999k -not \( -name '*.safetensors' -o -name '*.ckpt' -o -name '*.pt' \) -exec git lfs track '{}' + # IMPORTANT: make sure you add the git ssh key above before uploading !sleep 1m # gives you time to do so # upload your files now. do make sure you dont upload files that didnt download properly (interrupted aria2c, lfs pointers, etc) !git add .gitattributes models embeddings !git commit -m "add a message..." !git push ``` </details> <details> <summary>to upload a single file via huggingface_hub:</summary> ```python inurl=api.upload_folder( # https://huggingface.co/docs/huggingface_hub/v0.14.1/en/package_reference/hf_api#huggingface_hub.HfApi.upload_folder token="hf_token", # only needed if you didn't already use huggingface_hub.login() previously. path_or_fileobj="/content/stable-diffusion-webui/models/Stable-diffusion/Rev-Animated.safetensors", # location of the file you want to upload path_in_repo="models/Stable-diffusion/Rev-Animated.safetensors", repo_id="mirroring/civitai_mirror", commit_message="Uploading Rev Animated", # optional. commit_description="I want to upload Rev Animated because it's a special file for me.\nPlease accept my PR. I don't want to host it on my own HF repo!" # optional. Required (here) for PRs. create_pr=True # optional. needed if you're not a special person allowed to add new files to the repo (ie, if you just want us to mirror something; make sure to fill out the description/message above, as well) ) print("Uploaded files. Check them out at "+inurl) ``` </details> <details> <summary>to upload multiple files via huggingface_hub:</summary> ```python inurl=api.upload_folder( # https://huggingface.co/docs/huggingface_hub/v0.14.1/en/package_reference/hf_api#huggingface_hub.HfApi.upload_folder token="hf_token", # only needed if you didn't already use huggingface_hub.login() previously. folder_path="/stable-diffusion-webui/models/", path_in_repo="models", repo_id="mirroring/civitai_mirror", allow_patterns="*.safetensors", # optional. Only upload certain files. ignore_patterns=["*.tmp","tmp/*","*.jpg"], # optional. Ignore certain files. commit_message="Uploading my own models", commit_description="I want to upload these models because..." create_pr=True # needed if you're not a special person allowed to add new files to the repo (ie, if you just want us to mirror something) ) print("Uploaded files. Check them out at "+inurl) ``` </details> #### Using paperspace: It's extremely important to remove the ssh key from your HF repo after you're done with it. this ensures that nobody else can access your account. Paperspace makes free notebooks public, and I'm not sure if that includes filesystem access or outputs; if someone can access that ssh key and you didnt remove the access it generates, you've given them thr ability to make changes to your repo! This means they could delete *everything*. If you're technically inclined, you can possibly use the paperspace secrets configuration to hide such information (I'm not sure how it works yet) Alternatively, you can add big files via https://hf.co/anonderpling/repo_uploader before your session (the renamed file part is pretty much added for uploading from HF urls, but also works for adding preview images), and manually upload the civitai.info files locally (these are just simple civitai api responses afaik) ## TODO: 1. finish moving files around (figure out a way to do so without 2 commits per file (one to copy, one to delete file) without downloading every single file 2. move sfw models into a subdirectory 3. consider moving locons to their own directory in models, now that I'm using paperspace... - Perpetual: keep an eye on civitai update notifications ## License I like WTFPL. That means anything generated by @anonderpling is licensed WTFPL. That means pretty much only this readme and a couple scripts, since everything else is someone else's work. Other files? You'll have to find their official sources to find their licenses. The licenses for the civitai uploads *might* be in the .civitai.info files, which are standard json as returned by the civitai API.
DCU-NLP/electra-base-irish-cased-generator-v1
[ "pytorch", "electra", "fill-mask", "ga", "transformers", "irish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: Sebastian77/distilbert-base-uncased-finetuned-plantl_gob_es results: [] language: - es --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Sebastian77/distilbert-base-uncased-finetuned-plantl_gob_es This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.7487 - Train End Logits Accuracy: 0.5839 - Train Start Logits Accuracy: 0.5369 - Validation Loss: 2.0212 - Validation End Logits Accuracy: 0.5396 - Validation Start Logits Accuracy: 0.5058 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 5046, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 2.6777 | 0.4400 | 0.4209 | 2.1506 | 0.5098 | 0.4806 | 0 | | 2.0078 | 0.5336 | 0.4909 | 2.0486 | 0.5289 | 0.4965 | 1 | | 1.7487 | 0.5839 | 0.5369 | 2.0212 | 0.5396 | 0.5058 | 2 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
DJSammy/bert-base-danish-uncased_BotXO-ai
[ "pytorch", "jax", "da", "dataset:common_crawl", "dataset:wikipedia", "transformers", "bert", "masked-lm", "license:cc-by-4.0", "fill-mask" ]
fill-mask
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 9.56 +/- 4.47 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r ElementBrawlerAI/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
DJStomp/TestingSalvoNET
[ "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-03-28T23:47:00Z
--- license: wtfpl datasets: - Anthropic/hh-rlhf library_name: diffusers pipeline_tag: text-to-image tags: - not-for-all-eyes --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
DKpro000/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 219,262,901 | | parameter_size_embedding | 192,001,536 | 133,046,016 | | vocab_size | 250,002 | 173,237 | | compression_rate_full | 100.0 | 78.79 | | compression_rate_embedding | 100.0 | 69.29 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | | 2 |
DKpro000/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 5000 | 2 |
DSI/TweetBasedSA
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 10000 | 2 |
DSI/ar_emotion_6
[ "pytorch", "bert", "transformers" ]
null
{ "architectures": [ "BertForMultiLabelSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 15000 | 2 |
DSI/personal_sentiment
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 30000 | 2 |
DTAI-KULeuven/mbert-corona-tweets-belgium-curfew-support
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Tweets", "Sentiment analysis" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-en-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-en-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 60000 | 2 |
DTAI-KULeuven/mbert-corona-tweets-belgium-topics
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Dutch", "French", "English", "Tweets", "Topic classification" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
167
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 151,950,024 | | parameter_size_embedding | 192,001,536 | 65,820,672 | | vocab_size | 250,002 | 85,704 | | compression_rate_full | 100.0 | 54.6 | | compression_rate_embedding | 100.0 | 34.28 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | | 2 |
DTAI-KULeuven/robbertje-1-gb-bort
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:oscar (NL)", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit --- This is a mirror of pretrained model weights for [TransMVSNet](https://github.com/megvii-research/TransMVSNet) from the authors' Google Drive. See the link for model details.
DTAI-KULeuven/robbertje-1-gb-merged
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:oscar (NL)", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-03-29T00:05:51Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 5000 | 2 |
DTAI-KULeuven/robbertje-1-gb-shuffled
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:oscar (NL)", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-03-29T00:07:17Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 10000 | 2 |
alexandrainst/da-emotion-classification-base
[ "pytorch", "tf", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
837
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 15000 | 2 |
alexandrainst/da-hatespeech-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
866
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 30000 | 2 |
alexandrainst/da-hatespeech-detection-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,719
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
alexandrainst/da-sentiment-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "arxiv:1910.09700", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,432
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-fr-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-fr-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | fr | vocabtrimmer/mc4_validation | text | fr | validation | 60000 | 2 |
alexandrainst/da-hatespeech-detection-small
[ "pytorch", "electra", "text-classification", "da", "transformers", "license:cc-by-4.0" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,506
2023-03-29T00:16:38Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 137,223,674 | | parameter_size_embedding | 192,001,536 | 51,113,472 | | vocab_size | 250,002 | 66,554 | | compression_rate_full | 100.0 | 49.31 | | compression_rate_embedding | 100.0 | 26.62 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | | 2 |
DaWang/demo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 5000 | 2 |
Dablio/Dablio
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 10000 | 2 |
Daiki/scibert_scivocab_uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T00:21:37Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 15000 | 2 |
Daivakai/DialoGPT-small-saitama
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 30000 | 2 |
DamolaMack/Classyfied
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-pt-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-pt-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | pt | vocabtrimmer/mc4_validation | text | pt | validation | 60000 | 2 |
DanBot/TCRsynth
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 138,183,386 | | parameter_size_embedding | 192,001,536 | 52,071,936 | | vocab_size | 250,002 | 67,802 | | compression_rate_full | 100.0 | 49.65 | | compression_rate_embedding | 100.0 | 27.12 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | | 2 |
DanL/scientific-challenges-and-directions
[ "pytorch", "bert", "text-classification", "en", "dataset:DanL/scientific-challenges-and-directions-dataset", "arxiv:2108.13751", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
134
2023-03-29T00:31:26Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 5000 | 2 |
Danbi/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 10000 | 2 |
Danbi/distilroberta-base-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 15000 | 2 |
Dandara/bertimbau-socioambiental
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2023-03-29T00:35:29Z
--- license: afl-3.0 datasets: - gsdf/EasyNegative language: - ar library_name: keras tags: - biology --- # How to Use It is new model to analysis image.
Danih1502/t5-small-finetuned-en-to-de
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 30000 | 2 |
DannyMichael/ECU911
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-it-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-it-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | it | vocabtrimmer/mc4_validation | text | it | validation | 60000 | 2 |
Darein/Def
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 153,008,168 | | parameter_size_embedding | 192,001,536 | 66,877,440 | | vocab_size | 250,002 | 87,080 | | compression_rate_full | 100.0 | 54.98 | | compression_rate_embedding | 100.0 | 34.83 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | | 2 |
DarkKibble/DialoGPT-medium-Tankman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 5000 | 2 |
Darkecho789/email-gen
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 10000 | 2 |
DarkestSky/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 15000 | 2 |
Darkrider/covidbert_medmarco
[ "pytorch", "jax", "bert", "text-classification", "arxiv:2010.05987", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 30000 | 2 |
Darkrider/covidbert_mednli
[ "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-es-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-es-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | es | vocabtrimmer/mc4_validation | text | es | validation | 60000 | 2 |
Darren/darren
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T00:53:51Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1592795404806111233/AY8dX7lQ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">πŸ€– AI BOT πŸ€–</div> <div style="text-align: center; font-size: 16px; font-weight: 800">james*</div> <div style="text-align: center; font-size: 14px;">@hereafterthree</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from james*. | Data | james* | | --- | --- | | Tweets downloaded | 3132 | | Retweets | 251 | | Short tweets | 486 | | Tweets kept | 2395 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/4sq35aa1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hereafterthree's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/qm6el12k) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/qm6el12k/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hereafterthree') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Darya/layoutlmv2-finetuned-funsd-test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T00:54:40Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: city_council_gpt3_silver_standard_summaries__t5-large results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # city_council_gpt3_silver_standard_summaries__t5-large This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.3 - Pytorch 1.11.0+cu113 - Datasets 2.8.0 - Tokenizers 0.13.2
DataikuNLP/TinyBERT_General_4L_312D
[ "pytorch", "jax", "bert", "arxiv:1909.10351", "transformers" ]
null
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
74
2023-03-29T00:58:42Z
--- license: openrail library_name: diffusers pipeline_tag: text-to-image ---
DataikuNLP/average_word_embeddings_glove.6B.300d
[ "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "license:apache-2.0" ]
sentence-similarity
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: meghtedariatc results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # meghtedariatc This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1820 - Wer: 53.4928 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:-------:| | 0.0002 | 76.92 | 1000 | 1.1820 | 53.4928 | | 0.0002 | 153.85 | 2000 | 1.1820 | 53.4928 | | 0.0002 | 230.77 | 3000 | 1.1820 | 53.4928 | | 0.0002 | 307.69 | 4000 | 1.1820 | 53.4928 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
DataikuNLP/distiluse-base-multilingual-cased-v1
[ "pytorch", "distilbert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 156,557,872 | | parameter_size_embedding | 192,001,536 | 70,422,528 | | vocab_size | 250,002 | 91,696 | | compression_rate_full | 100.0 | 56.26 | | compression_rate_embedding | 100.0 | 36.68 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | | 2 |
DataikuNLP/paraphrase-albert-small-v2
[ "pytorch", "albert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "AlbertModel" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
628
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | 5000 | 2 |
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,517
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - afrispeech-200 metrics: - wer model-index: - name: whisper-small-hi-2400_500_100 results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: afrispeech-200 type: afrispeech-200 config: all split: train args: all metrics: - name: Wer type: wer value: 0.6376484560570072 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-hi-2400_500_100 This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the afrispeech-200 dataset. It achieves the following results on the evaluation set: - Loss: 1.1136 - Wer: 0.6376 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - training_steps: 900 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.0658 | 0.17 | 150 | 2.3108 | 1.0536 | | 1.4738 | 0.33 | 300 | 1.3138 | 1.0395 | | 0.8823 | 1.17 | 450 | 1.2148 | 0.7992 | | 1.1971 | 1.33 | 600 | 1.1466 | 0.7340 | | 0.7529 | 2.17 | 750 | 1.1256 | 0.6723 | | 1.1194 | 2.33 | 900 | 1.1136 | 0.6376 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Dave/twomad-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T01:12:41Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | 10000 | 2 |
DavidAMcIntosh/DialoGPT-small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | 15000 | 2 |
DavidSpaceG/MSGIFSR
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T01:17:00Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | 30000 | 2 |
Davlan/bert-base-multilingual-cased-finetuned-igbo
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2023-03-29T01:18:13Z
--- license: wtfpl language: - en pipeline_tag: text-generation tags: - llama library_name: adapter-transformers --- ο»Ώ<p align="center"><img src="https://media.discordapp.net/attachments/1089718753186549960/1090451672121233518/00278-279547758.png" height=256></p> <h1 align="center"> Alpaca 7B Native Enhanced </h1> <p align="center">The Most Advanced Alpaca 7B Model</p> ## πŸ“ƒ Model Facts - Trained natively on 8x Nvidia A100 40GB GPUs; no LoRA used - Trained on the largest & most accurate dataset yet - Enhanced Programming Capabilities - First Alpaca model to have conversational awareness ## πŸš€ Quick Start Guide Step 1. Make sure git-lfs is installed and ready to use ([Guide](https://git-lfs.com/)) Step 2. Download and install [text-generation-webui](https://github.com/oobabooga/text-generation-webui) according to the repository's instructions Step 3. Navigate over to one of it's model folders and clone this repository: git clone https://huggingface.co/8bit-coder/alpaca-7b-nativeEnhanced Step 4. Launch the webui, replace "Your name" with "User" and replace the default instruction prompt with: > You are an AI language model designed to assist the User by answering their questions, offering advice, and engaging in casual conversation in a friendly, helpful, and informative manner. You respond clearly, coherently, and you consider the conversation history. > > User: Hey, how's it going? > > Assistant: Hey there! I'm doing great, thank you. What can I help you with today? Let's have a fun chat! Step 5. Change the settings to match this screenshot: ![Settings](https://media.discordapp.net/attachments/1089718753186549960/1090428983595782194/image.png) ## πŸ“š Training #### We used 8x Nvidia A100 40GB GPUs for training this model. Training time took ~3 hours and resulting loss was 0.4761 over 3 epochs. The command used for training is as follows: > **torchrun --nproc_per_node=8 --master_port=3045 ./stanford_alpaca/train.py --model_name_or_path ./llama-7b-hf --data_path ./alpaca-7b-nativeEnhanced/training_files/alpaca-megaset-fixed.json --fp16 True --output_dir ./output_7b --num_train_epochs 3 --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --gradient_accumulation_steps 16 --evaluation_strategy "no" --save_strategy "steps" --save_steps 200 --learning_rate 2e-5 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type "cosine" --logging_steps 1 --fsdp "full_shard auto_wrap" --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' --tf32 True** There's a folder in this repository called training_files. **full-training-instructions.txt** is the full list of commands from start to finish of training, to converting the model all the way to 4 bit quantized ggml. **It is not recommended to quantize this model down to 4 bits. The instructions are included purely for informational purposes.** In addition, the training instructions file is built specifically for rented cloud computing. This means that by following the commands in the file, anyone should be able to train a similar model. ### Common errors while training: - CUDA Out of Memory error - This is because your GPUs do not have a minimum of 40GB of vram. The weakest GPU that we've been able to successfully train on has been Nvidia A100 40GB. Even with 8 of these, the vram usage was almost always right up at the limit. If you have 40GB GPUs and are still running into this error, try halving the **per_device_train_batch_size** and **per_device_eval_batch_size** and doubling the **gradient_accumulation_steps**. If you have more than 40GB of vram per GPU and wish to train faster, the opposite applies. - LLaMATokenizer error - This happens because you forgot to fix tokenizer_config.json in the llama-7b-hf directory. The fix is to rename **LLaMATokenizer** to **LlamaTokenizer** in that file. - RuntimeError: CUDA error: invalid device ordinal - This error occurs when your **nproc_per_node** is set to a number greater than how many GPUs you have installed in your system. You can check how many GPUs you have installed by running **nvidia-smi**. - torchrun is not recognized - This error occurs when you have a python version older than 3.10. Follow the instructions in the training instructions file to install miniconda and get python 3.10 set up. Circumventing this error by running python -m torch.distributed.run will **not work**. Many of the dependencies require python 3.10 and will fatally error out at the start of training. - KeyError - This happens when your JSON training data is broken in some way. Try running the dataset_validator.py in the training_files folder to find the broken key. ## πŸ“ Notes - The main version of this model is in the hugging face transformers data type. The other one (.pth) format is provided **purely for experimental use with llama.cpp** and is not guaranteed to have conversational awareness. - This model exhibits weird behavior when quantized to 4 bits. This might be due to the complexity of the model. We recommend the smallest quantization to be 8 bits, but this is untested. - This model is slightly **underfitted**. We observed that training the model with a smaller gradient accumulation size benefitted the response quality. - This model appears to have full conversational awareness. This means that provided you're running the model in the same configuration we detailed in the Quick Start Guide, you should be able to hold very detailed conversation with the AI without issues. There is a limit to it's memory, and it's 2048 tokens. Beyond that, it'll forget details and will need to be reminded. ## πŸ”§ Dataset The dataset used for training this model is made from [AlpacaDataCleaned](https://github.com/gururise/AlpacaDataCleaned) and [codealpaca](https://github.com/sahil280114/codealpaca). We combined these datasets for the following reasons: 1. Increased accuracy since the original stanford_alpaca dataset had many errors. 2. Better knowledge in programming 3. More training data We had an issue with the latest AlpacaDataCleaned dataset where at around 90k lines in, one of the keys has a typo. The key is "instruction:" instead of "instruction". We have fixed this error in the provided megaset but if you plan on grabbing directly from AlpacaDataCleaned, make sure to fix this error. Otherwise, the training script will fail due to a KeyError. ## πŸ‘¨β€πŸ’» Credits Credits go to [Meta](https://github.com/facebookresearch/llama) for creating the foundational LLaMA models and [Stanford](https://github.com/tatsu-lab/stanford_alpaca) for the instructions on how to train. For the dataset, credits go to [AlpacaDataCleaned](https://github.com/gururise/AlpacaDataCleaned) and [codealpaca](https://github.com/sahil280114/codealpaca). Credits also go to [chavinlo](https://huggingface.co/chavinlo/alpaca-native) for creating the original Alpaca 7B Native model, the inspiration behind this model. Lastly, credits go to the homies that stayed up all night again and again: 8bit, Ο€, chug, Taddy, yoyodapro, Symax, and most importantly: stablediffusion for the beautiful artwork
Davlan/bert-base-multilingual-cased-finetuned-luganda
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-de-60000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-de-60000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 132,185,186 | | parameter_size_embedding | 192,001,536 | 46,081,536 | | vocab_size | 250,002 | 60,002 | | compression_rate_full | 100.0 | 47.5 | | compression_rate_embedding | 100.0 | 24.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | de | vocabtrimmer/mc4_validation | text | de | validation | 60000 | 2 |
Davlan/bert-base-multilingual-cased-finetuned-swahili
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
67
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-ar` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-ar | |:---------------------------|:-------------------|:-------------------------------------------| | parameter_size_full | 278,295,186 | 124,394,447 | | parameter_size_embedding | 192,001,536 | 38,300,928 | | vocab_size | 250,002 | 49,871 | | compression_rate_full | 100.0 | 44.7 | | compression_rate_embedding | 100.0 | 19.95 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | | 2 |
Davlan/bert-base-multilingual-cased-finetuned-wolof
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-29T01:27:21Z
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-ar-5000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-ar-5000 | |:---------------------------|:-------------------|:------------------------------------------------| | parameter_size_full | 278,295,186 | 89,890,186 | | parameter_size_embedding | 192,001,536 | 3,841,536 | | vocab_size | 250,002 | 5,002 | | compression_rate_full | 100.0 | 32.3 | | compression_rate_embedding | 100.0 | 2.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | 5000 | 2 |
Davlan/bert-base-multilingual-cased-finetuned-yoruba
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-ar-10000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-ar-10000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 93,735,186 | | parameter_size_embedding | 192,001,536 | 7,681,536 | | vocab_size | 250,002 | 10,002 | | compression_rate_full | 100.0 | 33.68 | | compression_rate_embedding | 100.0 | 4.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | 10000 | 2 |
Davlan/bert-base-multilingual-cased-masakhaner
[ "pytorch", "tf", "bert", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
88
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - https://huggingface.co/vsevolodl/pokemon-lora These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
Davlan/bert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "bert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
269,898
null
--- license: mit tags: - generated_from_trainer model-index: - name: codeparrot-ds results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # codeparrot-ds This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.11.0+cu113 - Datasets 2.10.1 - Tokenizers 0.13.2
Davlan/byt5-base-eng-yor-mt
[ "pytorch", "t5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-ar-15000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-ar-15000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 97,580,186 | | parameter_size_embedding | 192,001,536 | 11,521,536 | | vocab_size | 250,002 | 15,002 | | compression_rate_full | 100.0 | 35.06 | | compression_rate_embedding | 100.0 | 6.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | 15000 | 2 |
Davlan/byt5-base-yor-eng-mt
[ "pytorch", "t5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
# Vocabulary Trimmed [xlm-roberta-base](https://huggingface.co/xlm-roberta-base): `vocabtrimmer/xlm-roberta-base-trimmed-ar-30000` This model is a trimmed version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | xlm-roberta-base | vocabtrimmer/xlm-roberta-base-trimmed-ar-30000 | |:---------------------------|:-------------------|:-------------------------------------------------| | parameter_size_full | 278,295,186 | 109,115,186 | | parameter_size_embedding | 192,001,536 | 23,041,536 | | vocab_size | 250,002 | 30,002 | | compression_rate_full | 100.0 | 39.21 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | ar | vocabtrimmer/mc4_validation | text | ar | validation | 30000 | 2 |
Davlan/distilbert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "distilbert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
123,856
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1632674116112089088/wjGdtuux_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">πŸ€– AI BOT πŸ€–</div> <div style="text-align: center; font-size: 16px; font-weight: 800">quiet luke</div> <div style="text-align: center; font-size: 14px;">@quietluke</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from quiet luke. | Data | quiet luke | | --- | --- | | Tweets downloaded | 3244 | | Retweets | 211 | | Short tweets | 1074 | | Tweets kept | 1959 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/44qcjzz1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @quietluke's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/x3gisyhj) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/x3gisyhj/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/quietluke') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/mbart50-large-eng-yor-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
#TODO card. Mix of (GPT-J-6B-Shinen + GPT-J-Dolly LoRA) + Pygmalion-6b At a ratio of GPT-J-6B-Shinen - 20% GPT-J-Dolly LoRA - 20% Pygmalion-6b - 60%
Davlan/mbart50-large-yor-eng-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- --- # Open Source + Copy Paste = Forked --- # Misc13 merged models by Mods13 civitai.com/user/Mods13/models --- # Be Careful! these models are not intended for commercial use if you do so you might be infringing copyrights and breaking the law please use them responsibly --- civitai.com/user/Powidl43
Davlan/mt5_base_eng_yor_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 language: - zh --- # Chinese-LLaMA-LoRA-7B This repo contains the tokenizer, Chinese-LLaMA LoRA weights and configs for [Chinese-LLaMA-Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) Instructions for using the weights can be found at https://github.com/ymcui/Chinese-LLaMA-Alpaca.
Davlan/naija-twitter-sentiment-afriberta-large
[ "pytorch", "tf", "xlm-roberta", "text-classification", "arxiv:2201.08277", "transformers", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
--- license: mit tags: - generated_from_trainer datasets: - Fraser/short-jokes metrics: - accuracy model-index: - name: gpt2-jokes results: - task: name: Causal Language Modeling type: text-generation dataset: name: Fraser/short-jokes type: Fraser/short-jokes config: default split: train[:5%] args: default metrics: - name: Accuracy type: accuracy value: 0.8795507387461411 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2-jokes This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the Fraser/short-jokes dataset. It achieves the following results on the evaluation set: - Loss: 0.6748 - Accuracy: 0.8796 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 128 - total_eval_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.06 | 100 | 0.7285 | 0.8732 | | No log | 0.12 | 200 | 0.7141 | 0.8747 | | No log | 0.17 | 300 | 0.7056 | 0.8757 | | No log | 0.23 | 400 | 0.6992 | 0.8764 | | 0.7907 | 0.29 | 500 | 0.6942 | 0.8771 | | 0.7907 | 0.35 | 600 | 0.6906 | 0.8777 | | 0.7907 | 0.41 | 700 | 0.6873 | 0.8779 | | 0.7907 | 0.47 | 800 | 0.6848 | 0.8782 | | 0.7907 | 0.52 | 900 | 0.6830 | 0.8786 | | 0.7105 | 0.58 | 1000 | 0.6809 | 0.8788 | | 0.7105 | 0.64 | 1100 | 0.6794 | 0.8790 | | 0.7105 | 0.7 | 1200 | 0.6780 | 0.8792 | | 0.7105 | 0.76 | 1300 | 0.6770 | 0.8793 | | 0.7105 | 0.81 | 1400 | 0.6760 | 0.8794 | | 0.7034 | 0.87 | 1500 | 0.6755 | 0.8794 | | 0.7034 | 0.93 | 1600 | 0.6750 | 0.8795 | | 0.7034 | 0.99 | 1700 | 0.6748 | 0.8795 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0-rc1 - Datasets 2.10.1 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-amharic
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
401
null
--- license: apache-2.0 language: - zh --- # Chinese-Alpaca-LoRA-7B This repo contains the tokenizer, Chinese-Alpaca LoRA weights and configs for [Chinese-LLaMA-Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) Instructions for using the weights can be found at https://github.com/ymcui/Chinese-LLaMA-Alpaca.
Davlan/xlm-roberta-base-finetuned-hausa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
2023-03-29T02:12:17Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### hffmodel Dreambooth model trained by ukeeba with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Davlan/xlm-roberta-base-finetuned-igbo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- license: apache-2.0 tags: - instruct - instructions - domain adapt - instructiongen metrics: - rouge widget: - text: >- You'll need to start by choosing the right venue. Consider the type of atmosphere and the size of the area that will be suitable for the number of guests you plan to invite. Choose the right decorations based on your brother's interests, such as balloons in his favorite colors, banners, and streamers. Next, decide on the food and drinks, making sure they are tasty and appropriate for the occasion. Then decide on the other games, music, and entertainment that will make the party memorable. Finally, involve your brother's friends and family to help create the perfect surprise. example_title: birthday party - text: 1) cookies and cream 2) chocolate chip 3) mint chip 4) oreo example_title: ice cream - text: >- Start by selecting a scale model of a building that fits the theme. Use a hobby knife and glue to cut and assemble the model into a ruined or abandoned version of itself, adding details like broken windows and graffiti. Create a base for the diorama using foam, plaster, or other materials, and paint it to resemble a ruined street or sidewalk. Add miniature vehicles, debris, and figures to complete the scene, and use weathering techniques like dry brushing and rust washes to add realism. Display the diorama in a shadow box or other protective case to showcase your work. example_title: Miniature diorama creation - text: >- Start by selecting clothing that is futuristic and edgy, such as leather jackets, neon-colored accessories, and tech-inspired patterns. Add accessories like goggles, cybernetic implants, and LED lights to enhance the cyberpunk vibe. Use makeup and body paint to create a futuristic look, such as metallic skin or neon makeup. Consider adding functional elements to your costume, such as a built-in backpack or hidden pockets for your tech gadgets. Finally, practice your confident walk and embrace your inner cyberpunk for a memorable and immersive costume experience. example_title: Cyberpunk costume design - text: >- Start by creating a base terrain with mountains, valleys, and other natural features. Use fractal noise and displacement mapping to add texture and detail to the terrain, and experiment with different materials like rock, grass, and water. Add surreal elements like floating islands, giant mushrooms, or impossible geometry to create a dreamlike atmosphere. Use lighting and color grading to enhance the mood and tone of the scene, and render the final image at a high resolution for maximum impact. Share your surreal landscape with the world and inspire others to explore the possibilities of 3D art. example_title: Surreal 3D landscape creation - text: >- Start by setting a realistic goal and creating a training plan. Build up your mileage gradually over time, and incorporate cross-training and strength exercises to prevent injury and improve endurance. Be sure to stay hydrated and properly fuel your body with nutritious foods. Listen to your body and adjust your training as needed to avoid overexertion or burnout. Finally, taper your training in the weeks leading up to the race to give your body time to rest and recover before the big day. example_title: Marathon training - text: >- What the hell did you just say about me, you little bug? I graduated top of my class in https://huggingface.co/spaces/safetensors/convert, and I've been involved in numerous secret tasks on PyTorch, and I have over 300 confirmed PRs. I am trained in code optimization and I'm the top converter in the entire Hugging Face forces. You are nothing to me but just another target. I will convert your code with precision the likes of which has never been seen before on this Earth, mark my freaking words. You think you can get away with saying your code is safe over the Internet? Think again, bug. As we speak I am contacting my secret network of data scientists across the GitHub and your IP is being traced right now so you better prepare for the storm, maggot. The storm that wipes out the pathetic little thing you call your code. You’re freaking doomed, kid. I can be anywhere, anytime, and I can convert your code in over seven hundred ways, and that’s just with my bare hands. Not only am I extensively trained in unarmed conversion, but I have access to the entire arsenal of the Hugging Face and I will use it to its full extent to wipe your miserable code off the face of the continent, you little bug. If only you could have known what unholy retribution your little "clever" comment was about to bring down upon you, maybe you would have held your freaking tongue. But you couldn’t, you didn’t, and now you’re paying the price, you goddamn idiot. I will convert fury all over you and you will drown in it. Your model's doomed, kiddo. Oh, and by the way, these converted files load much faster than your PyTorch counterparts. You can check the speed here: https://colab.research.google.com/github/huggingface/notebooks/blob/main/safetensors_doc/en/speed.ipynb Your widgets will run using this converted model, even if you do not merge. But, if you find any issues, feel free to report here: https://huggingface.co/spaces/safetensors/convert/discussions Feel free to ignore this PR. But remember, I'm watching you. example_title: Navy Safetensors PR inference: parameters: max_length: 96 num_beams: 4 early_stopping: true datasets: - pszemraj/fleece2instructions-inputs-alpaca-cleaned language: - en pipeline_tag: text2text-generation library_name: transformers --- # bart-large-instructiongen-w-inputs Use this text2text model to find out what LLM `instruction` (**and** `inputs` if relevant) might have generated `<arbitrary input text>`! This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the `pszemraj/fleece2instructions-inputs-alpaca-cleaned` dataset. It achieves the following results on the evaluation set: - Loss: 0.9302 - Rouge1: 64.2236 - Rouge2: 41.5632 - Rougel: 60.5935 - Rougelsum: 62.1285 - Gen Len: 25.8938 ## example ![api](https://i.imgur.com/2xubG7N.png) ## Intended uses & limitations This model is intended to be used to generate instructions from arbitrary text. You can then use these instructions + your data to fine-tune an LLM on instructions w.r.t. a specific domain. This model is primarily intended to enable **low-resource domain adaptation**, rather than "_I want to generate even better prompts for the FLAN-V2 dataset!_". The `fleece2instructions-inputs-alpaca-cleaned` dataset, obtained from the [alpaca-lora repo](https://github.com/tloen/alpaca-lora) under the ODC-BY license, has been converted to a text2text format for use with language models. In this dataset, the original 'inputs' and 'instructions' columns are combined into a single 'instructions_inputs' column. To clearly separate the two types of content, each piece of text is prefixed with either an `<instruction>` or `<inputs>` token. These tokens not only facilitate model comprehension, but also allow for easy regex separation of model outputs during inference. As such, users can expect the output of this model to be similarly structured with `<instruction>` and `<inputs>` tokens. ## Training and evaluation data Refer to the [fleece2instructions-inputs-alpaca-cleaned](https://huggingface.co/datasets/pszemraj/fleece2instructions-inputs-alpaca-cleaned) dataset ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.0145 | 1.0 | 1361 | 1.0460 | 62.8374 | 39.8538 | 59.2593 | 60.8095 | 25.2752 | | 0.8796 | 2.0 | 2722 | 0.9289 | 63.7086 | 41.1315 | 60.1588 | 61.7145 | 25.7215 | | 0.6943 | 3.0 | 4083 | 0.9302 | 64.2236 | 41.5632 | 60.5935 | 62.1285 | 25.8938 |
Davlan/xlm-roberta-base-finetuned-kinyarwanda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
--- title: Text2Video-Zero emoji: πŸš€ colorFrom: green colorTo: blue sdk: gradio sdk_version: 3.23.0 app_file: app.py pinned: false pipeline_tag: text-to-video --- Paper: https://arxiv.org/abs/2303.13439
Davlan/xlm-roberta-base-finetuned-luganda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: validation args: conll2003 metrics: - name: Precision type: precision value: 0.9432576769025367 - name: Recall type: recall value: 0.9511948838774823 - name: F1 type: f1 value: 0.947209653092006 - name: Accuracy type: accuracy value: 0.9913165375180094 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0137 - Precision: 0.9433 - Recall: 0.9512 - F1: 0.9472 - Accuracy: 0.9913 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0179 | 1.0 | 1756 | 0.0141 | 0.9337 | 0.9416 | 0.9377 | 0.9901 | | 0.0071 | 2.0 | 3512 | 0.0135 | 0.9442 | 0.9512 | 0.9477 | 0.9915 | | 0.0036 | 3.0 | 5268 | 0.0137 | 0.9433 | 0.9512 | 0.9472 | 0.9913 | ### Framework versions - Transformers 4.26.1 - Pytorch 2.0.0 - Datasets 2.10.1 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-luo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - instruct - instructions - domain adapt - instructiongen metrics: - rouge widget: - text: >- You'll need to start by choosing the right venue. Consider the type of atmosphere and the size of the area that will be suitable for the number of guests you plan to invite. Choose the right decorations based on your brother's interests, such as balloons in his favorite colors, banners, and streamers. Next, decide on the food and drinks, making sure they are tasty and appropriate for the occasion. Then decide on the other games, music, and entertainment that will make the party memorable. Finally, involve your brother's friends and family to help create the perfect surprise. example_title: birthday party - text: 1) cookies and cream 2) chocolate chip 3) mint chip 4) oreo example_title: ice cream - text: >- Start by selecting a scale model of a building that fits the theme. Use a hobby knife and glue to cut and assemble the model into a ruined or abandoned version of itself, adding details like broken windows and graffiti. Create a base for the diorama using foam, plaster, or other materials, and paint it to resemble a ruined street or sidewalk. Add miniature vehicles, debris, and figures to complete the scene, and use weathering techniques like dry brushing and rust washes to add realism. Display the diorama in a shadow box or other protective case to showcase your work. example_title: Miniature diorama creation - text: >- Start by selecting clothing that is futuristic and edgy, such as leather jackets, neon-colored accessories, and tech-inspired patterns. Add accessories like goggles, cybernetic implants, and LED lights to enhance the cyberpunk vibe. Use makeup and body paint to create a futuristic look, such as metallic skin or neon makeup. Consider adding functional elements to your costume, such as a built-in backpack or hidden pockets for your tech gadgets. Finally, practice your confident walk and embrace your inner cyberpunk for a memorable and immersive costume experience. example_title: Cyberpunk costume design - text: >- Start by creating a base terrain with mountains, valleys, and other natural features. Use fractal noise and displacement mapping to add texture and detail to the terrain, and experiment with different materials like rock, grass, and water. Add surreal elements like floating islands, giant mushrooms, or impossible geometry to create a dreamlike atmosphere. Use lighting and color grading to enhance the mood and tone of the scene, and render the final image at a high resolution for maximum impact. Share your surreal landscape with the world and inspire others to explore the possibilities of 3D art. example_title: Surreal 3D landscape creation - text: >- Start by setting a realistic goal and creating a training plan. Build up your mileage gradually over time, and incorporate cross-training and strength exercises to prevent injury and improve endurance. Be sure to stay hydrated and properly fuel your body with nutritious foods. Listen to your body and adjust your training as needed to avoid overexertion or burnout. Finally, taper your training in the weeks leading up to the race to give your body time to rest and recover before the big day. example_title: Marathon training inference: parameters: max_length: 96 num_beams: 4 early_stopping: true datasets: - pszemraj/fleece2instructions-inputs-alpaca-cleaned language: - en pipeline_tag: text2text-generation library_name: transformers --- # bart-base-instructiongen-w-inputs Use this text2text model to find out what LLM `instruction` (**and** `inputs` if relevant) might have generated `<arbitrary input text>`! - Check out a [basic demo on Spaces](https://huggingface.co/spaces/pszemraj/generate-instructions) - An example of how to use instructiongen models in a CLI script can be found [here](https://gist.github.com/pszemraj/8b0213e700763106074d3ac15d041c14) - You can find other models fine-tuned for instruction generation by [searching for the instructiongen tag](https://huggingface.co/models?other=instructiongen) ## about This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the `pszemraj/fleece2instructions-inputs-alpaca-cleaned` dataset. It achieves the following results on the evaluation set: - Loss: 0.9579 - Rouge1: 62.3604 - Rouge2: 39.5109 - Rougel: 58.8843 - Rougelsum: 60.4494 - Gen Len: 24.9917 ## Example ![base](https://i.imgur.com/1Vq5Fys.png) ## Intended uses & limitations This model is intended to be used to generate instructions from arbitrary text. You can then use these instructions + your data to fine-tune an LLM on instructions w.r.t. a specific domain. This model is primarily intended to enable **low-resource domain adaptation**, rather than "_I want to generate even better prompts for the FLAN-V2 dataset!_". The `fleece2instructions-inputs-alpaca-cleaned` dataset, obtained from the [alpaca-lora repo](https://github.com/tloen/alpaca-lora) under the ODC-BY license, has been converted to a text2text format for use with language models. In this dataset, the original 'inputs' and 'instructions' columns are combined into a single 'instructions_inputs' column. To clearly separate the two types of content, each piece of text is prefixed with either an `<instruction>` or `<inputs>` token. These tokens not only facilitate model comprehension, but also allow for easy regex separation of model outputs during inference. As such, users can expect the output of this model to be similarly structured with `<instruction>` and `<inputs>` tokens. This is just the base model, for better performance (but slower/compute intensive) see the [bart-large](https://huggingface.co/pszemraj/bart-large-instructiongen-w-inputs) version. Further exploration/data may lead to even better models! ## Training and evaluation data Refer to the [fleece2instructions-inputs-alpaca-cleaned](https://huggingface.co/datasets/pszemraj/fleece2instructions-inputs-alpaca-cleaned) dataset ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.02 - num_epochs: 2.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.1147 | 1.0 | 680 | 0.9901 | 61.8451 | 38.8293 | 58.3372 | 59.8658 | 25.2401 | | 0.9565 | 2.0 | 1360 | 0.9579 | 62.3604 | 39.5109 | 58.8843 | 60.4494 | 24.9917 |
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: other --- Models in this repository are released under the [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/).
Davlan/xlm-roberta-base-masakhaner
[ "pytorch", "xlm-roberta", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1248.89 +/- 21.54 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Davlan/xlm-roberta-base-ner-hrl
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
760
null
# Vocabulary Trimmed [google/mt5-small](https://huggingface.co/google/mt5-small): `vocabtrimmer/mt5-small-trimmed-en` This model is a trimmed version of [google/mt5-small](https://huggingface.co/google/mt5-small) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | google/mt5-small | vocabtrimmer/mt5-small-trimmed-en | |:---------------------------|:-------------------|:------------------------------------| | parameter_size_full | 300,176,768 | 258,413,952 | | parameter_size_embedding | 256,114,688 | 214,351,872 | | vocab_size | 250,112 | 209,328 | | compression_rate_full | 100.0 | 86.09 | | compression_rate_embedding | 100.0 | 83.69 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|:--------------------|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | | 2 |
Dawn576/Dawn
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-29T02:56:09Z
--- language: - id pipeline_tag: text-generation # library_name: transformers --- # About : This πŸ¦™ Llama model was trained on a translated Alpaca dataset in Bahasa Indonesia. It uses Parameter Efficient Fine Tuning and LoRA to enable training on consumer-grade GPU hardware. # How to Use : ## Load the πŸ¦™ Alpaca-LoRA model ```python import torch import bitsandbytes as bnb from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig from peft import PeftModel, PeftConfig, prepare_model_for_int8_training, LoraConfig, get_peft_model peft_model_id = "firqaaa/indo-Alpaca-LoRA-7b" tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf") model = LlamaForCausalLM.from_pretrained("decapoda-research/llama-7b-hf", load_in_8bit=True, device_map="auto") # Load the LoRA model model = PeftModel.from_pretrained(model, peft_model_id) ``` ## Prompt Template Prepare the prompt template ```python def generate_prompt(instruction, input=None): if input: return f"""Berikut ini adalah petunjuk yang menjelaskan tugas, serta masukan yang menyediakan konteks tambahan. Tulis balasan yang melengkapi permintaan dengan tepat. Petunjuk: {instruction} Masukan: {input} Output:""" else: return f"""Berikut ini terdapat panduan yang menjelaskan tugas. Mohon tuliskan balasan yang melengkapi permintaan dengan tepat. Panduan: {instruction} Output:""" ``` ## Evaluation feel free to change the parameters inside `GenerationConfig` to get better result. ```python generation_config = GenerationConfig( temperature=0.2, top_p=0.75, num_beams=8 ) def evaluate(instruction, input=None): prompt = generate_prompt(instruction, input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].cuda() generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=256 ) for s in generation_output.sequences: output = tokenizer.decode(s) print("Output:", output.split("Output:")[1].strip()) # input your question/instruction evaluate(input("Petunjuk: ")) ``` ## Note : Due to the high loss and lack of compute unit, we will update this model frequently to ensure the quality of generated text
Daymarebait/Discord_BOT_RICK
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: activelearning-sentiment-model-using-steam-data results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # activelearning-sentiment-model-using-steam-data This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2861 - Accuacy: 0.8470 - F1: 0.8467 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1 - Datasets 2.10.1 - Tokenizers 0.13.2
Dbluciferm3737/Idk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue model-index: - name: bert-fine-tuned-cola results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-fine-tuned-cola This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8369 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 459 | 0.4187 | | 0.5148 | 2.0 | 918 | 0.5389 | | 0.3202 | 3.0 | 1377 | 0.6432 | | 0.1684 | 4.0 | 1836 | 0.7600 | | 0.101 | 5.0 | 2295 | 0.8369 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Ddarkros/Test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1768.53 +/- 142.09 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DeadBeast/korscm-mBERT
[ "pytorch", "bert", "text-classification", "korean", "dataset:Korean-Sarcasm", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43
null
--- license: mit --- This repo contains a low-rank adapter for LLaMA-7b fit on the Stanford Alpaca dataset, [based on this rep](https://github.com/tloen/alpaca-lora)
DeadBeast/mbert-base-cased-finetuned-bengali-fakenews
[ "pytorch", "bert", "text-classification", "bengali", "dataset:BanFakeNews", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
2023-03-29T03:26:18Z
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegasus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-samsum This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.4812 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.6928 | 0.54 | 500 | 1.4812 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Dean/summarsiation
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1636311878275063814/mAnmCXzQ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">πŸ€– AI BOT πŸ€–</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Ai-oh πŸ’«</div> <div style="text-align: center; font-size: 14px;">@sansansansaname</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Ai-oh πŸ’«. | Data | Ai-oh πŸ’« | | --- | --- | | Tweets downloaded | 1999 | | Retweets | 24 | | Short tweets | 1306 | | Tweets kept | 669 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/c2qx733w/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @sansansansaname's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/5b93euwo) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/5b93euwo/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/sansansansaname') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
DecafNosebleed/DialoGPT-small-ScaraBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
Access to model liyin-ttsa/alpaca-lora-65b is restricted and you are not in the authorized list. Visit https://huggingface.co/liyin-ttsa/alpaca-lora-65b to ask for access.
DecafNosebleed/scarabot-model
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.10 +/- 0.50 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Declan/Breitbart_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: srimoyee12/my_awesome_model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # srimoyee12/my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the [Auditor Review Dataset](https://huggingface.co/datasets/demo-org/auditor_review). It achieves the following results on the evaluation set: - Train Loss: 0.1735 - Validation Loss: 0.3834 - Train Accuracy: 0.8524 - Epoch: 3 ## Model description This is a simple classifier model based on DistilBERT. It classifies given data into Negative, Neutral or Positive based on the sentiment. ## Intended uses & limitations Can be used for text classification. This is created for illustration purposes and might not have the highest accuracy. ## Training and evaluation data Default split from the [dataset card](https://huggingface.co/datasets/demo-org/auditor_review) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1210, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.5919 | 0.4004 | 0.8359 | 0 | | 0.2881 | 0.3590 | 0.8473 | 1 | | 0.1735 | 0.3834 | 0.8524 | 2 | ### Framework versions - Transformers 4.27.3 - TensorFlow 2.11.0 - Datasets 2.10.1 - Tokenizers 0.13.2
Declan/Breitbart_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: http://www.huggingtweets.com/hutaosoulmate/1680061774875/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1539749115092934656/WeP6cOjo_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">πŸ€– AI BOT πŸ€–</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Laurent</div> <div style="text-align: center; font-size: 14px;">@hutaosoulmate</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Laurent. | Data | Laurent | | --- | --- | | Tweets downloaded | 1181 | | Retweets | 212 | | Short tweets | 101 | | Tweets kept | 868 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/y718bopk/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @hutaosoulmate's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/vz2s932i) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/vz2s932i/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/hutaosoulmate') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Declan/Breitbart_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: whisper-small-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-en This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Declan/CNN_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8633333333333333 - name: F1 type: f1 value: 0.8664495114006515 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3321 - Accuracy: 0.8633 - F1: 0.8664 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Declan/CNN_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 554.50 +/- 116.07 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Niraya666 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Niraya666 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Niraya666 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 50000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Declan/test_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: Regression_albert_8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Regression_albert_8 This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0710 - Mse: 0.0710 - Mae: 0.1978 - R2: 0.0202 - Accuracy: 0.9259 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mse | Mae | R2 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:--------:| | No log | 1.0 | 49 | 0.0777 | 0.0777 | 0.2323 | 0.2804 | 0.9464 | | No log | 2.0 | 98 | 0.0649 | 0.0649 | 0.2176 | 0.3990 | 0.9464 | | No log | 3.0 | 147 | 0.0885 | 0.0885 | 0.2354 | 0.1799 | 0.8571 | | No log | 4.0 | 196 | 0.0620 | 0.0620 | 0.1971 | 0.4252 | 0.9643 | | No log | 5.0 | 245 | 0.0605 | 0.0605 | 0.2071 | 0.4394 | 0.9821 | | No log | 6.0 | 294 | 0.0523 | 0.0523 | 0.1714 | 0.5155 | 0.9821 | | No log | 7.0 | 343 | 0.1047 | 0.1047 | 0.2598 | 0.0301 | 0.8393 | | No log | 8.0 | 392 | 0.0421 | 0.0421 | 0.1543 | 0.6103 | 0.9643 | | No log | 9.0 | 441 | 0.0445 | 0.0445 | 0.1612 | 0.5875 | 0.9643 | | No log | 10.0 | 490 | 0.0438 | 0.0438 | 0.1608 | 0.5939 | 0.9821 | | 0.0478 | 11.0 | 539 | 0.0529 | 0.0529 | 0.1816 | 0.5095 | 0.9464 | | 0.0478 | 12.0 | 588 | 0.0401 | 0.0401 | 0.1495 | 0.6288 | 0.9643 | | 0.0478 | 13.0 | 637 | 0.0471 | 0.0471 | 0.1637 | 0.5639 | 0.9643 | | 0.0478 | 14.0 | 686 | 0.0454 | 0.0454 | 0.1632 | 0.5797 | 0.9643 | | 0.0478 | 15.0 | 735 | 0.0436 | 0.0436 | 0.1526 | 0.5957 | 0.9643 | | 0.0478 | 16.0 | 784 | 0.0520 | 0.0520 | 0.1764 | 0.5178 | 0.9643 | | 0.0478 | 17.0 | 833 | 0.0414 | 0.0414 | 0.1536 | 0.6166 | 0.9821 | | 0.0478 | 18.0 | 882 | 0.0413 | 0.0413 | 0.1490 | 0.6176 | 0.9643 | | 0.0478 | 19.0 | 931 | 0.0413 | 0.0413 | 0.1514 | 0.6174 | 0.9821 | | 0.0478 | 20.0 | 980 | 0.0429 | 0.0429 | 0.1537 | 0.6023 | 0.9821 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Digakive/Hsgshs
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Vocabulary Trimmed [google/mt5-small](https://huggingface.co/google/mt5-small): `vocabtrimmer/mt5-small-trimmed-en-30000` This model is a trimmed version of [google/mt5-small](https://huggingface.co/google/mt5-small) by [`vocabtrimmer`](https://github.com/asahi417/lm-vocab-trimmer), a tool for trimming vocabulary of language models to compress the model size. Following table shows a summary of the trimming process. | | google/mt5-small | vocabtrimmer/mt5-small-trimmed-en-30000 | |:---------------------------|:-------------------|:------------------------------------------| | parameter_size_full | 300,176,768 | 74,783,104 | | parameter_size_embedding | 256,114,688 | 30,721,024 | | vocab_size | 250,112 | 30,001 | | compression_rate_full | 100.0 | 24.91 | | compression_rate_embedding | 100.0 | 12.0 | Following table shows the parameter used to trim vocabulary. | language | dataset | dataset_column | dataset_name | dataset_split | target_vocab_size | min_frequency | |:-----------|:----------------------------|:-----------------|:---------------|:----------------|--------------------:|----------------:| | en | vocabtrimmer/mc4_validation | text | en | validation | 30000 | 2 |
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,785,283
2023-03-29T09:15:36Z
--- license: apache-2.0 language: - en library_name: spacy pipeline_tag: text-classification tags: - medical datasets: - Anthropic/hh-rlhf --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
albert-large-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
687
2023-03-29T09:16:01Z
--- license: mit tags: - generated_from_trainer model-index: - name: gpt2-synth-real results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2-synth-real This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4267 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 4 - seed: 21 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 80.0811 | 0.01 | 10 | 76.0005 | | 63.2867 | 0.02 | 20 | 55.5177 | | 30.7747 | 0.03 | 30 | 19.9807 | | 17.8136 | 0.04 | 40 | 12.4432 | | 6.5809 | 0.05 | 50 | 5.7868 | | 3.8801 | 0.06 | 60 | 3.3238 | | 2.2807 | 0.07 | 70 | 1.8328 | | 1.3763 | 0.08 | 80 | 1.1114 | | 0.8658 | 0.09 | 90 | 1.0464 | | 0.748 | 0.1 | 100 | 0.8199 | | 0.5694 | 0.12 | 110 | 0.5984 | | 0.6427 | 0.13 | 120 | 0.6168 | | 0.5534 | 0.14 | 130 | 0.5981 | | 0.5483 | 0.15 | 140 | 0.5450 | | 0.5384 | 0.16 | 150 | 0.4926 | | 0.4926 | 0.17 | 160 | 0.4924 | | 0.5059 | 0.18 | 170 | 0.4762 | | 0.4372 | 0.19 | 180 | 0.4733 | | 0.4833 | 0.2 | 190 | 0.4785 | | 0.4511 | 0.21 | 200 | 0.4511 | | 0.4163 | 0.22 | 210 | 0.4534 | | 0.4849 | 0.23 | 220 | 0.4420 | | 0.4857 | 0.24 | 230 | 0.4421 | | 0.6163 | 0.25 | 240 | 0.4336 | | 0.5151 | 0.26 | 250 | 0.4344 | | 0.4533 | 0.27 | 260 | 0.4280 | | 0.3812 | 0.28 | 270 | 0.4387 | | 0.475 | 0.29 | 280 | 0.4274 | | 0.4354 | 0.3 | 290 | 0.4272 | | 0.3912 | 0.31 | 300 | 0.4305 | | 0.3944 | 0.32 | 310 | 0.4204 | | 0.4166 | 0.33 | 320 | 0.4239 | | 0.3677 | 0.35 | 330 | 0.4246 | | 0.4135 | 0.36 | 340 | 0.4267 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.6.1 - Tokenizers 0.12.1
albert-large-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26,792
2023-03-29T09:16:38Z
--- metrics: - perplexity --- # RoBERTa based model Trained on 10% of the python code snippets in the [codesearchnet](https://github.com/github/CodeSearchNet) dataset for code variable name suggestion. Use the 'fill-mask' inference pipeline to suggest variable names.
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,621,271
2023-03-29T09:23:00Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="AymenKallala/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2023-03-29T09:23:40Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 50.40 +/- 43.40 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2023-03-29T09:23:51Z
--- license: apache-2.0 library_name: adapter-transformers tags: - medical --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.projectpro.io%2Farticle%2Fheart-disease-prediction-using-machine-learning-project%2F615&psig=AOvVaw2-NYaLblNc8lJAuMPJm8tf&ust=1680169242474000&source=images&cd=vfe&ved=0CBAQjRxqFwoTCJCCoajsgP4CFQAAAAAdAAAAABAJ ### Model Description <!-- Provide a longer summary of what this model is. --> πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡πŸ‘‡ For more info view our page 😊😊 https://colab.research.google.com/drive/1bdFtsrVyYwbXH_jH6yJR3QEC6UQiguUK#scrollTo=emwnJJVwAupA πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘†πŸ‘† πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜†πŸ˜† - **Developed by:** CH SAI ASHOK - **Model type:** Disease Prediction through Symptoms - **Language(s) (NLP):** Text - Classification - **License:** Apache-2.0 - **Finetuned from model [optional]:** NLP DISEASE PREDICTION ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2023-03-29T09:23:52Z
--- license: apache-2.0 tags: - medical language: - en library_name: spacy pipeline_tag: text-classification --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://www.cricbuzz.com/). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
bert-base-multilingual-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
328,585
null
--- license: apache-2.0 language: - en library_name: spacy pipeline_tag: text-classification tags: - medical --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
null
import pandas as pd import re import spacy from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score, classification_report from sklearn.linear_model import LogisticRegression # Load the data data = pd.read_csv('symptomssingle.csv') # Check for any missing values and remove them data = data.dropna() # Define a function to separate symptoms and diseases from the text def separate_symptoms_and_diseases(text): symptoms = re.findall(r'{"symptoms":"(.*?)"}', text) disease = re.sub(r'(?:{"symptoms":".*?"},?)+', '', text).strip() disease = disease.replace('],', '').strip() # Remove '],' from the disease name return symptoms, disease # Apply the function to the data data['symptoms_and_diseases'] = data['data'].apply(separate_symptoms_and_diseases) data[['symptoms', 'disease']] = pd.DataFrame(data['symptoms_and_diseases'].tolist(), index=data.index) data = data.drop(columns=['data', 'symptoms_and_diseases']) # Load the spaCy model nlp = spacy.load('en_core_web_sm') # Preprocessing function def preprocess(symptoms): processed_symptoms = [] for symptom in symptoms: doc = nlp(symptom) processed_symptom = ' '.join(token.lemma_.lower() for token in doc if not token.is_stop and token.is_alpha) processed_symptoms.append(processed_symptom) return ' '.join(processed_symptoms) # Preprocess the symptoms column data['symptoms_preprocessed'] = data['symptoms'].apply(preprocess) # Split the data into train and test sets X_train, X_test, y_train, y_test = train_test_split(data['symptoms_preprocessed'], data['disease'], test_size=0.2, random_state=42) # Create a pipeline for text classification pipeline = Pipeline([ ('tfidf', TfidfVectorizer(ngram_range=(1, 2))), ('classifier', LogisticRegression(solver='liblinear', C=10)) ]) # Train the model pipeline.fit(X_train, y_train) # Make predictions y_pred = pipeline.predict(X_test) # Evaluate the model print("Accuracy: ", accuracy_score(y_test, y_pred)) print("Classification Report:\n", classification_report(y_test, y_pred))
ctrl
[ "pytorch", "tf", "ctrl", "en", "arxiv:1909.05858", "arxiv:1910.09700", "transformers", "license:bsd-3-clause", "has_space" ]
null
{ "architectures": null, "model_type": "ctrl", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,007
2023-03-29T09:32:08Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: AutonomousTaxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="AymenKallala/AutonomousTaxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
257,745
2023-03-29T09:32:10Z
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image ---