id
stringlengths
2
115
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]
downloads
int64
0
8.87M
likes
int64
0
3.84k
paperswithcode_id
stringlengths
2
45
tags
list
lastModified
timestamp[us, tz=UTC]
createdAt
stringlengths
24
24
key
stringclasses
1 value
created
timestamp[us]
card
stringlengths
1
1.01M
embedding
list
library_name
stringclasses
21 values
pipeline_tag
stringclasses
27 values
mask_token
null
card_data
null
widget_data
null
model_index
null
config
null
transformers_info
null
spaces
null
safetensors
null
transformersInfo
null
modelId
stringlengths
5
111
embeddings
list
Chapad0o/Ursos_sem_curso
Chapad0o
2023-10-26T12:26:38Z
0
0
null
[ "license:openrail", "region:us" ]
2023-10-26T12:26:38Z
2023-10-26T12:23:48.000Z
2023-10-26T12:23:48
--- license: openrail ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
faizalnf1800/yelp_review_full_preprocessed
faizalnf1800
2023-10-26T13:18:27Z
0
0
null
[ "license:afl-3.0", "region:us" ]
2023-10-26T13:18:27Z
2023-10-26T12:42:41.000Z
2023-10-26T12:42:41
--- license: afl-3.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: text dtype: string - name: label dtype: int64 splits: - name: train num_bytes: 281739751 num_examples: 650000 - name: test num_bytes: 21706191 num_examples: 50000 download_size: 174987714 dataset_size: 303445942 --- This Dataset Preprocessed Using NLTK Library. Tokenize,Lemmatize,Lower,Stopword for easier to analyze by ML Model
[ -0.35586586594581604, -0.7098191380500793, -0.23544132709503174, 0.18372268974781036, -0.4811056852340698, -0.1558704674243927, -0.20729345083236694, -0.16694562137126923, 0.33462950587272644, 0.7907671928405762, -0.890204668045044, -0.7002178430557251, -0.6342523097991943, 0.2736199200153...
null
null
null
null
null
null
null
null
null
null
null
null
null
Ryan20/hotel_dataset_pushed
Ryan20
2023-10-27T08:50:48Z
0
0
null
[ "region:us" ]
2023-10-27T08:50:48Z
2023-10-26T12:50:32.000Z
2023-10-26T12:50:32
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: answers sequence: string - name: context dtype: string - name: questions sequence: string splits: - name: train num_bytes: 4634 num_examples: 7 download_size: 7932 dataset_size: 4634 --- # Dataset Card for "hotel_dataset_pushed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4043380618095398, -0.3408950865268707, 0.1660793125629425, 0.28546011447906494, -0.1467149555683136, -0.03285680338740349, 0.07211398333311081, 0.07085800170898438, 0.7693894505500793, 0.685884416103363, -0.8523958921432495, -0.7578244209289551, -0.31216561794281006, -0.2798101603984833...
null
null
null
null
null
null
null
null
null
null
null
null
null
Back-up/UIT-Squad-test
Back-up
2023-10-26T13:11:07Z
0
0
null
[ "region:us" ]
2023-10-26T13:11:07Z
2023-10-26T13:11:05.000Z
2023-10-26T13:11:05
--- dataset_info: features: - name: id dtype: string - name: title dtype: string - name: context dtype: string - name: question dtype: string - name: answers struct: - name: answer_start sequence: int64 - name: text sequence: string - name: is_impossible dtype: bool splits: - name: train num_bytes: 4654487 num_examples: 3821 download_size: 613301 dataset_size: 4654487 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "UIT-Squad-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5997490882873535, -0.3594619631767273, -0.07103197276592255, 0.3142973482608795, -0.04880359023809433, 0.30136749148368835, 0.4138109087944031, -0.0489128902554512, 0.6015946865081787, 0.23268993198871613, -1.083938717842102, -0.4337072968482971, -0.24817372858524323, -0.076716959476470...
null
null
null
null
null
null
null
null
null
null
null
null
null
varshil27/Symtoms-Disease-LLama2-Format
varshil27
2023-10-26T13:21:40Z
0
0
null
[ "license:other", "region:us" ]
2023-10-26T13:21:40Z
2023-10-26T13:20:37.000Z
2023-10-26T13:20:37
--- license: other license_name: other license_link: LICENSE ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
Facico/test
Facico
2023-10-27T06:48:02Z
0
0
null
[ "region:us" ]
2023-10-27T06:48:02Z
2023-10-26T13:20:59.000Z
2023-10-26T13:20:59
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
othorizedshogun/poems_dataset
othorizedshogun
2023-10-26T13:37:06Z
0
0
null
[ "region:us" ]
2023-10-26T13:37:06Z
2023-10-26T13:37:02.000Z
2023-10-26T13:37:02
--- dataset_info: features: - name: poem dtype: string - name: form dtype: string - name: topic dtype: string - name: input dtype: string splits: - name: train num_bytes: 38858200.040046886 num_examples: 18428 - name: test num_bytes: 4318514.959953116 num_examples: 2048 download_size: 28597631 dataset_size: 43176715.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
enakilci/fi_corpora_parliament_processed
enakilci
2023-10-26T13:40:10Z
0
0
null
[ "region:us" ]
2023-10-26T13:40:10Z
2023-10-26T13:39:56.000Z
2023-10-26T13:39:56
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 309048227 num_examples: 1969624 download_size: 175929435 dataset_size: 309048227 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "fi_corpora_parliament_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.551241934299469, -0.31164610385894775, 0.23627430200576782, 0.2866816818714142, -0.33184322714805603, 0.06764134764671326, -0.16546601057052612, -0.08656122535467148, 0.7321967482566833, 0.8201548457145691, -0.784290611743927, -0.7536885142326355, -0.6623625755310059, 0.0509160198271274...
null
null
null
null
null
null
null
null
null
null
null
null
null
JFaces/threads-squeez
JFaces
2023-10-26T14:16:21Z
0
0
null
[ "region:us" ]
2023-10-26T14:16:21Z
2023-10-26T13:40:50.000Z
2023-10-26T13:40:50
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
carnival13/nci_nq_t5_tokenized
carnival13
2023-10-26T13:53:37Z
0
0
null
[ "region:us" ]
2023-10-26T13:53:37Z
2023-10-26T13:53:14.000Z
2023-10-26T13:53:14
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: eval path: data/eval-* - split: eval_zero_shot path: data/eval_zero_shot-* - split: eval_normal path: data/eval_normal-* dataset_info: features: - name: input dtype: string - name: label sequence: int64 - name: input_ids sequence: int32 - name: attention_mask sequence: int8 splits: - name: train num_bytes: 137430914 num_examples: 177638 - name: eval num_bytes: 1529607 num_examples: 7830 - name: eval_zero_shot num_bytes: 562161 num_examples: 2859 - name: eval_normal num_bytes: 967446 num_examples: 4971 download_size: 61636686 dataset_size: 140490128 --- # Dataset Card for "nci_nq_t5_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.39435023069381714, 0.03341805562376976, 0.1455821990966797, 0.2434491515159607, -0.34493109583854675, 0.34667813777923584, 0.3600970208644867, -0.11407623440027237, 0.9320704936981201, 0.44368553161621094, -0.7157653570175171, -1.0284310579299927, -0.5076336860656738, 0.1537854224443435...
null
null
null
null
null
null
null
null
null
null
null
null
null
zion84006/tencent_data_encodec
zion84006
2023-10-26T21:43:04Z
0
0
null
[ "region:us" ]
2023-10-26T21:43:04Z
2023-10-26T14:18:56.000Z
2023-10-26T14:18:56
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: valid path: data/valid-* - split: test path: data/test-* dataset_info: features: - name: file_id dtype: int64 - name: instruction dtype: string - name: transcription dtype: string - name: src_encodec_0 sequence: int64 - name: src_encodec_1 sequence: int64 - name: src_encodec_2 sequence: int64 - name: src_encodec_3 sequence: int64 - name: src_encodec_4 sequence: int64 - name: src_encodec_5 sequence: int64 - name: src_encodec_6 sequence: int64 - name: src_encodec_7 sequence: int64 - name: tgt_encodec_0 sequence: int64 - name: tgt_encodec_1 sequence: int64 - name: tgt_encodec_2 sequence: int64 - name: tgt_encodec_3 sequence: int64 - name: tgt_encodec_4 sequence: int64 - name: tgt_encodec_5 sequence: int64 - name: tgt_encodec_6 sequence: int64 - name: tgt_encodec_7 sequence: int64 splits: - name: train num_bytes: 18586613483 num_examples: 266780 - name: valid num_bytes: 527894882 num_examples: 7620 - name: test num_bytes: 508453304 num_examples: 7620 download_size: 472185815 dataset_size: 19622961669 --- # Dataset Card for "tencent_data_encodec" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6804223656654358, -0.2984616458415985, 0.31749454140663147, 0.3821054995059967, -0.10087330639362335, 0.01860017701983452, -0.03365352377295494, 0.057774074375629425, 0.9415125846862793, 0.5801095366477966, -0.6325408816337585, -0.8203985691070557, -0.4521246552467346, 0.046291392296552...
null
null
null
null
null
null
null
null
null
null
null
null
null
pglo/NeurIPS_llm_challenge
pglo
2023-10-26T14:38:20Z
0
0
null
[ "region:us" ]
2023-10-26T14:38:20Z
2023-10-26T14:27:18.000Z
2023-10-26T14:27:18
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
emi429/humansleepproject-small-individuals
emi429
2023-10-26T18:18:10Z
0
0
null
[ "region:us" ]
2023-10-26T18:18:10Z
2023-10-26T14:31:15.000Z
2023-10-26T14:31:15
--- dataset_info: features: - name: rr_intervals dtype: int64 - name: sleep_stage dtype: int64 - name: patient_id dtype: int64 splits: - name: test num_bytes: 12096 num_examples: 504 - name: train num_bytes: 49680 num_examples: 2070 download_size: 47116 dataset_size: 61776 --- # Dataset Card for "humansleepproject-small-individuals" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4903988242149353, -0.08434464782476425, 0.2487887144088745, 0.28870105743408203, -0.08080676198005676, 0.060692135244607925, 0.13745935261249542, -0.3124452233314514, 1.011404275894165, 0.38297560811042786, -0.8252649307250977, -0.5669659376144409, -0.376980185508728, -0.184855610132217...
null
null
null
null
null
null
null
null
null
null
null
null
null
yangcong/sd_config
yangcong
2023-10-26T14:53:34Z
0
0
null
[ "region:us" ]
2023-10-26T14:53:34Z
2023-10-26T14:52:54.000Z
2023-10-26T14:52:54
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
JWBickel/StrongsChunked_English_Phrase_Counts
JWBickel
2023-10-26T15:53:22Z
0
0
null
[ "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-10-26T15:53:22Z
2023-10-26T15:14:50.000Z
2023-10-26T15:14:50
--- language: - en size_categories: - 10K<n<100K --- These are KJV phrases and their counts, chunked by Strong's. It's a CSV file, delimited by carats. ------------------------------------- RowID ^ StrongsChunkedPhrase ^ Count _____________________________________ Note that the first record is nonsense - it's just a space. Taking it out would have thrown off the Row IDs. Don't overlook it (but overlook my flaw).
[ 0.10667991638183594, -0.895297110080719, 0.3595011532306671, 0.5932682156562805, -0.9295505285263062, -0.009096124209463596, 0.14134889841079712, 0.08839044719934464, 0.7608322501182556, 0.839921772480011, -0.1517317146062851, -0.7249847650527954, -0.6706812381744385, 0.17194817960262299, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
mesolitica/translated-router-switch-instruct
mesolitica
2023-10-26T15:17:36Z
0
0
null
[ "region:us" ]
2023-10-26T15:17:36Z
2023-10-26T15:17:11.000Z
2023-10-26T15:17:11
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
JWBickel/KJVWordCounts
JWBickel
2023-10-26T15:34:15Z
0
0
null
[ "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-10-26T15:34:15Z
2023-10-26T15:19:53.000Z
2023-10-26T15:19:53
--- language: - en pretty_name: KJV Word Counts size_categories: - 10K<n<100K --- # Dataset Card for KJVWordCounts This is a simple list of every word and their total counts in the King James version of the Bible. ## Dataset Structure It's a CSV file, delimited with a carat. Word ^ Count ## Dataset Card Author Jeremy Bickel ## Dataset Card Contact JeremyWBickel@gmail.com
[ -0.11573289334774017, -0.13097035884857178, -0.04982516169548035, 0.1221940740942955, -0.6289433240890503, 0.3182002305984497, -0.19015178084373474, 0.06389680504798889, 0.5262472033500671, 0.8482682108879089, -0.48863574862480164, -0.9692065119743347, -0.6973173022270203, 0.03219321742653...
null
null
null
null
null
null
null
null
null
null
null
null
null
Maxlinn/dalle3-paper-images
Maxlinn
2023-10-26T15:24:55Z
0
0
null
[ "region:us" ]
2023-10-26T15:24:55Z
2023-10-26T15:22:53.000Z
2023-10-26T15:22:53
# DALLE3 Paper Images Extracted examples shown in dalle-3 paper without any compression: https://cdn.openai.com/papers/dall-e-3.pdf Used https://pdfcandy.com/extract-images.html to extract images from dalle-3 paper. Do not include three images that caption may not be correct.
[ -0.39703184366226196, -0.41480037569999695, 0.7553953528404236, 0.4390562176704407, -0.4129300117492676, -0.3558616638183594, 0.3772810101509094, -0.5704945921897888, 0.3041515052318573, 0.5972844958305359, -0.24006736278533936, -0.47105371952056885, -0.47602546215057373, 0.262494802474975...
null
null
null
null
null
null
null
null
null
null
null
null
null
ParisNeo/lollms_aware_dataset
ParisNeo
2023-10-27T20:43:37Z
0
1
null
[ "task_categories:conversational", "language:en", "license:apache-2.0", "LoLLMs", "QnA", "region:us" ]
2023-10-27T20:43:37Z
2023-10-26T15:25:07.000Z
2023-10-26T15:25:07
--- license: apache-2.0 task_categories: - conversational language: - en tags: - LoLLMs - QnA --- # LoLLMs-QNA Dataset ## Dataset Description The LoLLMs-QNA dataset was created by ParisNeo. The dataset is based on the documentation and knowledge base developed for LoLLMs. It aims to provide a comprehensive collection of questions and corresponding answers related to LoLLMs and its functionalities. The dataset is structured as a JSON file, with each entry consisting of a question and its corresponding answer. The questions cover various aspects of LoLLMs, including installation, features, functionalities, system requirements, and comparisons with other similar tools. The answers provide detailed information and instructions to assist users in understanding and utilizing LoLLMs effectively. It is important to note that the dataset also contains some generic thoughts and reflections about AI and its potential uses and threats. These thoughts represent ParisNeo's personal views and should not be considered as a universally accepted truth. ## Dataset Creation Process The LoLLMs-QNA dataset was created using a two-step process outlined in ParisNeo's white paper titled "From Text to Interactive Knowledge: Building Chat-Style Databases for AI Training." The process involves extracting questions from raw text and then utilizing a vectorized version of the raw data along with an LLM to generate answers. The raw text used for question extraction includes the documentation and knowledge base developed for LoLLMs, along with ParisNeo's personal insights and expertise in the field of AI. The questions were then manually crafted from this raw text to cover a wide range of topics related to LoLLMs. To generate the answers, a vectorized version of the raw data was created, along with an LLM model trained on the specific domain of LoLLMs. The LLM model was then used to generate accurate and informative answers to the extracted questions. ## Used Models This database was built using Database Maker on LoLLMs. Database Maker implements the algorithm presented in the white paper [From Text to Interactive Knowledge: Building Chat-Style Databases for AI Training](https://huggingface.co/datasets/ParisNeo/lollms_aware_dataset/resolve/main/lollms_db_building_strategy.pdf) To do the LLM tasks required to generate the questions and answers, I used the [airoboros-l2-70b-2.2.1](https://huggingface.co/jondurbin/airoboros-l2-70b-2.2.1) model by [jondurbin](https://huggingface.co/jondurbin). Updates to this database will come as LoLLMs documentation evolve and new functionalities are added constantly. So stay tuned. ## Dataset Format The LoLLMs-QNA dataset is provided as a JSON file. Each entry in the dataset consists of a dictionary with two key-value pairs: - "question": The question posed by the user. - "answer": The corresponding answer to the question. Example entry: ``` { "question": "What are the features of Lollms-webui?", "answer": "The features of Lollms-webui include:..." } ``` ## Usage and Disclaimer The LoLLMs-QNA dataset is intended to be used for various tasks, including training AI models, developing chatbots, and assisting users in understanding and utilizing LoLLMs. However, it is important to note that the dataset reflects ParisNeo's personal vision and perspectives about AI and LoLLMs. The answers provided in the dataset should not be considered as universally accepted truths, but rather as ParisNeo's personal insights and instructions. It is recommended to use the dataset in conjunction with other sources of information and to verify the accuracy and relevance of the answers provided. Users should exercise critical thinking and consider the specific context and requirements of their own applications and use cases. ## Acknowledgments ParisNeo would like to express gratitude to the open-source community and contributors who have supported the development and improvement of LoLLMs. The dataset is provided as a contribution back to the community and aims to facilitate the understanding and utilization of LoLLMs. ## Special thanks Special Thanks to [jondurbin](https://huggingface.co/jondurbin) for his advices and for providing the LLM that was used to build this dataset. Also special thanks to [Tom Jobbins](https://huggingface.co/TheBloke) for quantizing the model that was used to build this database. ## Licence Apache 2.0.
[ -0.40610742568969727, -0.9484795928001404, 0.32503917813301086, 0.015689896419644356, -0.08390024304389954, -0.17402663826942444, 0.008540814742445946, -0.47257092595100403, 0.2971089780330658, 0.7725067138671875, -0.5316834449768066, -0.6689971685409546, -0.20787695050239563, 0.0137354210...
null
null
null
null
null
null
null
null
null
null
null
null
null
anlp/anno_augmented
anlp
2023-10-26T17:33:41Z
0
0
null
[ "region:us" ]
2023-10-26T17:33:41Z
2023-10-26T15:31:37.000Z
2023-10-26T15:31:37
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: sentences sequence: string - name: ner_tags sequence: string splits: - name: train num_bytes: 1227934 num_examples: 247 download_size: 0 dataset_size: 1227934 --- # Dataset Card for "anno_augmented" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.7647762894630432, -0.5203880071640015, 0.06730709969997406, 0.2770622670650482, -0.25770944356918335, -0.026334227994084358, 0.27522996068000793, -0.6345118284225464, 1.0069622993469238, 0.5868708491325378, -0.6481190919876099, -0.6216540336608887, -0.5811387300491333, 0.000765057862736...
null
null
null
null
null
null
null
null
null
null
null
null
null
Kishore05/Kan
Kishore05
2023-10-26T15:57:28Z
0
0
null
[ "region:us" ]
2023-10-26T15:57:28Z
2023-10-26T15:57:25.000Z
2023-10-26T15:57:25
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* dataset_info: features: - name: review dtype: string - name: review_length dtype: int64 splits: - name: train num_bytes: 19721.78947368421 num_examples: 17 - name: validation num_bytes: 2320.2105263157896 num_examples: 2 download_size: 25309 dataset_size: 22042.0 --- # Dataset Card for "Kan" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4221488833427429, -0.3241894841194153, 0.31893211603164673, 0.24686606228351593, -0.307869553565979, 0.16511520743370056, 0.24762767553329468, -0.3462396562099457, 0.9761353731155396, 0.5146880745887756, -0.9090808629989624, -0.9568521976470947, -0.5610563158988953, -0.19137316942214966...
null
null
null
null
null
null
null
null
null
null
null
null
null
laurievb/open-lid-dataset
laurievb
2023-11-10T10:12:56Z
0
1
null
[ "task_categories:text-classification", "size_categories:100M<n<1B", "license:other", "region:us" ]
2023-11-10T10:12:56Z
2023-10-26T16:00:52.000Z
2023-10-26T16:00:52
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: text dtype: string - name: language dtype: class_label: names: '0': plt_Latn '1': sun_Latn '2': ukr_Cyrl '3': spa_Latn '4': por_Latn '5': mya_Mymr '6': mkd_Cyrl '7': war_Latn '8': nso_Latn '9': wol_Latn '10': kam_Latn '11': mal_Mlym '12': gle_Latn '13': ayr_Latn '14': rus_Cyrl '15': pbt_Arab '16': pag_Latn '17': twi_Latn '18': als_Latn '19': lit_Latn '20': amh_Ethi '21': tur_Latn '22': tel_Telu '23': vec_Latn '24': zsm_Latn '25': ckb_Arab '26': tgk_Cyrl '27': tha_Thai '28': hye_Armn '29': deu_Latn '30': tat_Cyrl '31': swh_Latn '32': kac_Latn '33': tuk_Latn '34': lvs_Latn '35': tso_Latn '36': fao_Latn '37': tpi_Latn '38': umb_Latn '39': mlt_Latn '40': cym_Latn '41': ben_Beng '42': hat_Latn '43': ron_Latn '44': tir_Ethi '45': ewe_Latn '46': ind_Latn '47': snd_Arab '48': nld_Latn '49': urd_Arab '50': vie_Latn '51': mar_Deva '52': fra_Latn '53': lug_Latn '54': pol_Latn '55': ban_Latn '56': est_Latn '57': srp_Cyrl '58': kin_Latn '59': nno_Latn '60': fur_Latn '61': kmr_Latn '62': bho_Deva '63': fin_Latn '64': mri_Latn '65': ilo_Latn '66': fij_Latn '67': slk_Latn '68': knc_Arab '69': guj_Gujr '70': kor_Hang '71': tum_Latn '72': kab_Latn '73': afr_Latn '74': eng_Latn '75': acq_Arab '76': som_Latn '77': tgl_Latn '78': epo_Latn '79': bjn_Arab '80': mni_Beng '81': sot_Latn '82': nob_Latn '83': kat_Geor '84': ory_Orya '85': arb_Arab '86': heb_Hebr '87': ibo_Latn '88': asm_Beng '89': uzn_Latn '90': sna_Latn '91': mos_Latn '92': fuv_Latn '93': hne_Deva '94': apc_Arab '95': hun_Latn '96': ita_Latn '97': bem_Latn '98': slv_Latn '99': ssw_Latn '100': szl_Latn '101': nya_Latn '102': kir_Cyrl '103': hrv_Latn '104': pap_Latn '105': kik_Latn '106': knc_Latn '107': lmo_Latn '108': hau_Latn '109': eus_Latn '110': ltz_Latn '111': grn_Latn '112': lus_Latn '113': taq_Latn '114': scn_Latn '115': kmb_Latn '116': azj_Latn '117': isl_Latn '118': swe_Latn '119': uig_Arab '120': jpn_Jpan '121': sag_Latn '122': xho_Latn '123': ast_Latn '124': kan_Knda '125': sin_Sinh '126': acm_Arab '127': tzm_Tfng '128': dan_Latn '129': zho_Hant '130': zho_Hans '131': pes_Arab '132': fon_Latn '133': tam_Taml '134': yor_Latn '135': run_Latn '136': arz_Arab '137': awa_Deva '138': pan_Guru '139': gaz_Latn '140': lao_Laoo '141': bos_Latn '142': ces_Latn '143': bam_Latn '144': crh_Latn '145': ltg_Latn '146': bul_Cyrl '147': gla_Latn '148': ell_Grek '149': prs_Arab '150': smo_Latn '151': ajp_Arab '152': tsn_Latn '153': bak_Cyrl '154': srd_Latn '155': ace_Arab '156': kas_Arab '157': lua_Latn '158': taq_Tfng '159': jav_Latn '160': cat_Latn '161': kon_Latn '162': hin_Deva '163': lin_Latn '164': khk_Cyrl '165': cjk_Latn '166': mag_Deva '167': dik_Latn '168': bug_Latn '169': bjn_Latn '170': yue_Hant '171': zul_Latn '172': npi_Deva '173': kas_Deva '174': dzo_Tibt '175': ary_Arab '176': bel_Cyrl '177': kbp_Latn '178': khm_Khmr '179': ace_Latn '180': nus_Latn '181': ceb_Latn '182': mai_Deva '183': san_Deva '184': dyu_Latn '185': quy_Latn '186': lim_Latn '187': min_Latn '188': oci_Latn '189': kaz_Cyrl '190': luo_Latn '191': sat_Olck '192': ydd_Hebr '193': shn_Mymr '194': ars_Arab '195': lij_Latn '196': aeb_Arab '197': bod_Tibt '198': glg_Latn '199': kea_Latn '200': azb_Arab - name: dataset_source dtype: string splits: - name: train num_bytes: 21749592609 num_examples: 118296182 download_size: 16568412828 dataset_size: 21749592609 license: other task_categories: - text-classification size_categories: - 100M<n<1B --- # Dataset Card for "open-lid-dataset" ## Dataset Description - **Repository:** [https://github.com/laurieburchell/open-lid-dataset]() - **Paper:** [An Open Dataset and Model for Language Identification](https://aclanthology.org/2023.acl-short.75/) - **Point of Contact:** laurie.burchell AT ed.ac.uk ### Dataset Summary The OpenLID dataset covers 201 languages and is designed for training language identification models. The majority of the source datasets were derived from news sites, Wikipedia, or religious text, though some come from other domains (e.g. transcribed conversations, literature, or social media). A sample of each language in each source was manually audited to check it was in the attested language (see [the paper](https://aclanthology.org/2023.acl-short.75/)) for full details. ### Supported tasks This dataset is intended for training high-coverage language identification models (e.g. [OpenLID](https://huggingface.co/laurievb/OpenLID)). It is compatible with the [FLORES-200](https://github.com/facebookresearch/flores/tree/main/flores200) evaluation benchmark. ### Languages There are 201 languages included in the dataset with varying amounts of data: the largest class (English) contains 7.5 million lines of data, and the smallest (South Azerbaijani) contains 532 lines of data. The mean number of lines per language is 602,812. A full breakdown of lines of data per language is available [on the repo](https://github.com/laurieburchell/open-lid-dataset/blob/main/languages.md). ## Dataset Structure ### Data Instances Each entry in the dataset consists of a line of data, a language label included script information, and a tag indicating the source. ```json { "text": "¿Serás exaltada hasta el cielo?", "language": "spa_Latn", "dataset_source": "lti" } ``` ### Data Splits Only a train split is provided. The dataset is designed to be compatible with the [FLORES-200](https://github.com/facebookresearch/flores/tree/main/flores200) evaluation benchmark. ## Dataset Creation ### Curation Rationale Recent work has found that existing language identification algorithms perform poorly in practice compared to test performance. The problem is particularly acute for low-resource languages: [Kreutzer et al. (2022)](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00447/109285/Quality-at-a-Glance-An-Audit-of-Web-Crawled) found a positive Spearman rank correlation between quality of data and size of language for all of the \ac{lid}-filtered multilingual datasets they studied. In addition, for a significant fraction of the language corpora they studied, less than half of the sentences were in the correct language. They point out that such low-quality data not only leads to poor performance in downstream tasks, but that it also contributes to `representation washing', where the community is given a false view of the actual progress of low-resource natural language processing. There are several open language identification models offering quick classification and high language coverage (e.g. CLD3, No Language Left Behind). However, to the best of our knowledge, none of the commonly-used scalable language identificaiton systems make their training data public. This dataset aims to address that gap by curating and combining sources of open training data for language identification and by auditing a sample of all languages in each source to check reliability. ### Source Data The majority of the source datasets were derived from news sites, Wikipedia, or religious text, though some come from other domains (e.g. transcribed conversations, literature, or social media). We provide a full list at the end of this model card along with the licensing information for each source. #### Initial Data Collection and Normalisation Our initial aim was to cover the same languages present in the FLORES-200 Evaluation Benchmark so that we could use this dataset for evaluation. However, during the curation process, we decided to exclude three languages. Firstly, though Akan and Twi are both included as separate languages in FLORES-200, Akan is actually a macrolanguage covering a language continuum which includes Twi. Given the other languages in FLORES-200 are individual languages, we decided to exclude Akan. Secondly, FLORES-200 includes Modern Standard Arabic (MSA) written in Latin script. It is true that Arabic dialects are often written in Latin characters in informal situations (e.g. social media). However, MSA is a form of standardised Arabic which is not usually used in informal situations. Since we could not any find naturally-occurring training data, we excluded MSA from the dataset. Finally, we excluded Minangkabau in Arabic script because it is now rarely written this way, making it difficult to find useful training data. The first step in our manual audit was to check and standardise language labels, as these are often inconsistent or idiosyncratic. We chose to copy the language codes in FLORES-200 and reassign macrolanguage or ambiguous language codes in the data sources we found to the dominant individual language. Whilst this resulted in more useful data for some languages, for other languages we had to be more conservative. For example, we originally reassigned text labelled as the macrolanguage Malay (msa_Latn) to Standard Malay, but this led to a large drop in performance as the former covers a very diverse set of languages. Two of the authors then carried out a manual audit of a random sample of all data sources and languages: one a native Bulgarian speaker (able to read Cyrillic and Latin scripts and Chinese characters), and the other a native English speaker (able to read Latin, Arabic and Hebrew scripts). For languages we knew, we checked the language was what we expected. For unfamiliar languages in a script we could read, we compared the sample to the Universal Declaration of Human Rights or failing that, to a sample of text on Wikipedia. We compared features of the text which are common in previous language identification algorithms and could be identified easily by humans: similar diacritics, word lengths, common words, loan words matching the right cultural background, similar suffixes and prefixes, and vowel/consonant patterns. For scripts we could not read, we checked that all lines of the sample matched the script in the Universal Declaration of Human Rights. We kept preprocessing minimal so that the process was as language agnostic as possible. We used the scripts provided with Moses to remove non-printing characters and detokenise the data where necessary. We then filtered the data so that each line contained at least one character in the expected script (as defined by Perl) to allow for borrowings. Finally, we sampled proportionally to $ p_l^{0.3} $, where $ p_l $ is the fraction of lines in the dataset which are in language $ l $. This aims to ameliorate class skew issues. ## Considerations for Using the Data ### Social Impact of Dataset This dataset covers a number of low-resourced languages. This makes it a potentially useful resource, but due to the limited amount of data and domains, care must be taken not to overclaim performance or coverage. ### Discussion of Biases Our work aims to broaden natural language processing coverage by allowing practitioners to identify relevant data in more languages. However, we note that language identification is inherently a normative activity that risks excluding minority dialects, scripts, or entire microlanguages from a macrolanguage. Choosing which languages to cover may reinforce power imbalances, as only some groups gain access to language processing technologies. In addition, errors in language identification can have a significant impact on downstream performance, particularly (as is often the case) when a system is used as a `black box'. The performance of our classifier is not equal across languages which could lead to worse downstream performance for particular groups. We mitigate this by providing metrics by class. ## Additional information The dataset was curated from the sources listed below by Laurie Burchell and Nikolay Bogoychev. ### Licensing Information License considerations for each source are given below. Open use for non-commercial purposes is covered by all licences. If you view any part of this dataset as a violation of intellectual property rights, please let us know and we will remove it. | Source | Description | License | |---|---|---| |[Arabic Dialects Dataset](https://www.lancaster.ac.uk/staff/elhaj/corpora.html)| Dataset of Arabic dialects for Gulf, Egyptian, Levantine, and Tunisian Arabic dialects plus MSA|No explicit license; website describes data as "some free and useful Arabic corpora that I have created for researchers working on Arabic Natural Language Processing, Corpus and Computational Linguistics."| |[BLTR](https://github.com/shashwatup9k/bho-resources)|Monolingual Bhojpuri corpus|[CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)| |[Global Voices](https://opus.nlpl.eu/GlobalVoices-v2015.php)|A parallel corpus of news stories from the web site Global Voices|The website for [Global Voices](https://globalvoices.org/) is licensed as [Creative Commons Attribution 3.0](https://creativecommons.org/licenses/by/3.0/). There is no explicit additional license accompanying the dataset.| |[Guaraní Parallel Set](https://github.com/sgongora27/giossa-gongora-guarani-2021)|Parallel Guaraní-Spanish news corpus sourced from Paraguyan websites|No explicit license| |[HKCanCor](https://github.com/fcbond/hkcancor)|Transcribed conversations in Hong Kong Cantonese|[CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode)| |[IADD](https://github.com/JihadZa/IADD)|Arabic dialect identification dataset covering 5 regions (Maghrebi, Levantine, Egypt, Iraq, and Gulf) and 9 countries (Algeria, Morocco, Tunisia, Palestine, Jordan, Syria, Lebanon, Egypt and Iraq). It is created from five corpora: [DART](http://qufaculty.qu.edu.qa/telsay), [SHAMI](https://github.com/GU-CLASP/shami-corpus), [TSAC](https://github.com/fbougares/TSAC), [PADIC](https://sourceforge.net/projects/padic/), and [AOC](https://www.cs.jhu.edu/data-archive/AOC-2010/). | Multiple licenses: [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) (SHAMI); [GNU Lesser General Public License v3.0](https://github.com/fbougares/TSAC/blob/master/LICENSE) (TSAC); [GNU General Public License v3](https://www.gnu.org/licenses/gpl-3.0.en.html) (PADIC). DART and AOC had no explicit license.| |[Leipzig Corpora Collection](https://wortschatz.uni-leipzig.de/en/download)|A collection of corpora in different languages with an identical format.|The [Terms of Usage](https://wortschatz.uni-leipzig.de/en/usage) states "Permission for use is granted free of charge solely for non-commercial personal and scientific purposes licensed under the [Creative Commons License CC BY-NC](https://creativecommons.org/licenses/by-nc/4.0/)."| |[LTI](https://www.cs.cmu.edu/~ralf/langid.html)|Training data for language identification|From the README: "With the exception of the contents of the Europarl/, ProjectGutenberg/, and PublicDomain/ directories, all code and text in this corpus are copyrighted. However, they may be redistributed under the terms of various Creative Commons licenses and the GNU GPL. Copying the unmodified archive noncommercially is permitted by all of the licenses. For commercial redistribution or redistribution of modified versions, please consult the individual licenses."| |[MADAR Shared Task 2019, subtask 1](https://camel.abudhabi.nyu.edu/madar-shared-task-2019/)|Dialectal Arabic in the travel domain|The MADAR Corpus has a custom license, the text of which can be found in this repo.| |[EM corpus](http://lepage-lab.ips.waseda.ac.jp/en/projects/meiteilon-manipuri-language-resources/)|Parallel Manipuri-English sentences crawled from [The Sangai Express](https://www.thesangaiexpress.com/)|[CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/)| |[MIZAN](https://github.com/omidkashefi/Mizan)|Parallel Persian-English corpus from literature domain|[CC BY 4.0](https://creativecommons.org/licenses/by/4.0/)| |[MT560 v1](https://opus.nlpl.eu/MT560.php)|A machine translation dataset for over 500 languages to English. We have filtered out data from OPUS-100, Europarl, Open Subtitles, Paracrawl, Wikimedia, Wikimatrix, Wikititles, and Common Crawl due to issues with the fidelity of the language labels. |[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0)| |[NLLB Seed](https://github.com/facebookresearch/flores/blob/main/nllb_seed/README.md)|Around 6000 sentences in 39 languages sampled from Wikipedia, intended to cover languages lacking training data.|[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)| |[SETIMES](https://opus.nlpl.eu/SETIMES.php)|A parallel corpus of news articles in the Balkan languages|[CC-BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/)| |[Tatoeba](https://opus.nlpl.eu/Tatoeba.php)|Collaborative sentence translations|[CC BY 2.0 FR](https://creativecommons.org/licenses/by/2.0/fr/)| |[Tehran English-Persian parallel corpus (TEP)](https://opus.nlpl.eu/TEP.php)|Parallel Persian-English sentences sourced from subtitles|[GNU General Public License](https://www.gnu.org/licenses/gpl-3.0.html)| |[Turkic Interlingua (TIL) Corpus](https://github.com/turkic-interlingua/til-mt)|A large-scale parallel corpus combining most of the public datasets for 22 Turkic languages|[CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)| |[WiLI-2018](https://zenodo.org/record/841984)|Wikipedia language identification benchmark containing 235K paragraphs of 235 languages|[Open Data Commons Open Database License (ODbL) v1.0](https://opendatacommons.org/licenses/odbl/1-0/)| |[XL-Sum](https://github.com/csebuetnlp/xl-sum)|Summarisation dataset covering 44 languages, sourced from BBC News|[CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)| ### Citation Information If you use this dataset, please cite all the authors [in the citation file](https://github.com/laurieburchell/open-lid-dataset/blob/main/citations.bib) who compiled the source datasets, plus the OpenLID paper: ```bibtex @inproceedings{burchell-etal-2023-open, title = "An Open Dataset and Model for Language Identification", author = "Burchell, Laurie and Birch, Alexandra and Bogoychev, Nikolay and Heafield, Kenneth", editor = "Rogers, Anna and Boyd-Graber, Jordan and Okazaki, Naoaki", booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)", month = jul, year = "2023", address = "Toronto, Canada", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.acl-short.75", doi = "10.18653/v1/2023.acl-short.75", pages = "865--879", abstract = "Language identification (LID) is a fundamental step in many natural language processing pipelines. However, current LID systems are far from perfect, particularly on lower-resource languages. We present a LID model which achieves a macro-average F1 score of 0.93 and a false positive rate of 0.033{\%} across 201 languages, outperforming previous work. We achieve this by training on a curated dataset of monolingual data, which we audit manually to ensure reliability. We make both the model and the dataset available to the research community. Finally, we carry out detailed analysis into our model{'}s performance, both in comparison to existing open models and by language class.", } ``` ### Contributions Thanks to @hac541309 and @davanstrien for adding this dataset.
[ -0.3315199017524719, -0.453360378742218, 0.006288476753979921, 0.21695038676261902, -0.15304060280323029, 0.2079106867313385, -0.6624844670295715, -0.5858095288276672, 0.05427885800600052, 0.38298019766807556, -0.16508466005325317, -0.6321869492530823, -0.45339617133140564, 0.3539490103721...
null
null
null
null
null
null
null
null
null
null
null
null
null
royson/train_splits_helm
royson
2023-10-26T16:27:29Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-26T16:27:29Z
2023-10-26T16:17:46.000Z
2023-10-26T16:17:46
--- license: apache-2.0 --- Contains the following train split from datasets in [helm](https://github.com/stanford-crfm/helm): - big bench - mmlu - TruthfulQA - cnn/dm - gsm - bbq - boolq - NarrativeQA - QuAC - math - bAbI Each prompt has <= 5 in-context samples along with a sample, all of which from the train set of the respective datasets.
[ -0.8031102418899536, -0.6632604598999023, 0.44213417172431946, 0.39050254225730896, -0.10077542811632156, 0.1371222287416458, -0.08011294156312943, -0.1842811107635498, 0.0333884060382843, 0.556081235408783, -1.1206800937652588, -0.7350300550460815, -0.3040953278541565, 0.03106873668730259...
null
null
null
null
null
null
null
null
null
null
null
null
null
sabman/llmrouter
sabman
2023-11-08T19:32:26Z
0
1
null
[ "license:cc-by-4.0", "region:us" ]
2023-11-08T19:32:26Z
2023-10-26T16:44:40.000Z
2023-10-26T16:44:40
--- license: cc-by-4.0 ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
vishnusr/code_searchnet_reduced
vishnusr
2023-10-26T16:57:30Z
0
0
null
[ "region:us" ]
2023-10-26T16:57:30Z
2023-10-26T16:57:00.000Z
2023-10-26T16:57:00
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: 'Unnamed: 0.1' dtype: int64 - name: 'Unnamed: 0' dtype: int64 - name: code dtype: string - name: docstring dtype: string - name: prompt dtype: string splits: - name: train num_bytes: 992068 num_examples: 500 download_size: 440777 dataset_size: 992068 --- # Dataset Card for "code_searchnet_reduced" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.7289102673530579, -0.01282355934381485, -0.018157633021473885, -0.03271016851067543, 0.03150613233447075, -0.07949017733335495, -0.023157423362135887, 0.1537458598613739, 0.9401636123657227, 0.5708654522895813, -0.9116181135177612, -0.7746967673301697, -0.3006438612937927, -0.1846975982...
null
null
null
null
null
null
null
null
null
null
null
null
null
CJWeiss/lcr
CJWeiss
2023-10-26T17:10:08Z
0
0
null
[ "region:us" ]
2023-10-26T17:10:08Z
2023-10-26T17:09:59.000Z
2023-10-26T17:09:59
--- dataset_info: features: - name: Long Text dtype: string - name: Summary dtype: string splits: - name: train num_bytes: 82108819 num_examples: 2918 - name: test num_bytes: 18916443 num_examples: 584 - name: valid num_bytes: 12955974 num_examples: 389 download_size: 56044522 dataset_size: 113981236 --- # Dataset Card for "lcr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6148062348365784, -0.16936545073986053, 0.1414634883403778, 0.016078589484095573, -0.18451517820358276, 0.17325522005558014, 0.22590643167495728, -0.18179760873317719, 0.5529050230979919, 0.6247765421867371, -0.8341385126113892, -0.6901540160179138, -0.4748435318470001, -0.2525217235088...
null
null
null
null
null
null
null
null
null
null
null
null
null
Aliquip/sd-prompttastic
Aliquip
2023-11-02T19:06:03Z
0
0
null
[ "license:cc0-1.0", "region:us" ]
2023-11-02T19:06:03Z
2023-10-26T17:19:38.000Z
2023-10-26T17:19:38
--- license: cc0-1.0 ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
Kishore05/kan100
Kishore05
2023-10-26T17:40:48Z
0
0
null
[ "region:us" ]
2023-10-26T17:40:48Z
2023-10-26T17:31:58.000Z
2023-10-26T17:31:58
Entry not found
[ -0.3227647542953491, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965083122253, 0.7915717959403992, 0.07618629932403564, 0.7746022343635559, 0.2563222348690033, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
Th3ro/nRadioWaveDataset
Th3ro
2023-10-26T18:15:07Z
0
0
null
[ "task_categories:image-classification", "size_categories:n<1K", "language:en", "license:wtfpl", "region:us" ]
2023-10-26T18:15:07Z
2023-10-26T18:01:47.000Z
2023-10-26T18:01:47
--- license: wtfpl task_categories: - image-classification language: - en size_categories: - n<1K ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
SummerSigh/AncientMNIST
SummerSigh
2023-10-26T18:06:58Z
0
0
null
[ "region:us" ]
2023-10-26T18:06:58Z
2023-10-26T18:03:37.000Z
2023-10-26T18:03:37
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: image dtype: image - name: label dtype: class_label: names: '0': Alpha '1': Beta '2': Chi '3': Delta '4': Epsilon '5': Eta '6': Gamma '7': Iota '8': Kappa '9': Lambda '10': LunateSigma '11': Mu '12': Nu '13': Omega '14': Omicron '15': Phi '16': Pi '17': Psi '18': Rho '19': Tau '20': Theta '21': Upsilon '22': Xi '23': Zeta splits: - name: train num_bytes: 309609553.26 num_examples: 205797 download_size: 217254607 dataset_size: 309609553.26 --- # Dataset Card for "AncientMNIST" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6496226787567139, -0.32982972264289856, 0.17440150678157806, -0.22829030454158783, -0.32663968205451965, -0.1735917031764984, 0.15801629424095154, -0.2742129862308502, 0.836234986782074, 0.6002943515777588, -0.5577822327613831, -0.758497416973114, -0.5951105952262878, -0.343935906887054...
null
null
null
null
null
null
null
null
null
null
null
null
null
vira-chatbot/vira-intents-mod
vira-chatbot
2023-10-31T03:54:15Z
0
0
null
[ "region:us" ]
2023-10-31T03:54:15Z
2023-10-26T18:04:54.000Z
2023-10-26T18:04:54
--- dataset_info: features: - name: text dtype: string - name: label dtype: int64 splits: - name: train num_bytes: 509234 num_examples: 7047 - name: validation num_bytes: 213834 num_examples: 2971 download_size: 329146 dataset_size: 723068 --- # Dataset Card for "vira-intents-mod" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.3495950996875763, -0.4513622224330902, 0.3307555317878723, 0.18790480494499207, -0.3063890337944031, -0.43440282344818115, 0.15468676388263702, -0.054047826677560806, 0.8852218389511108, 0.5910779237747192, -0.9854716658592224, -0.7077866196632385, -0.40091270208358765, -0.2748416066169...
null
null
null
null
null
null
null
null
null
null
null
null
null
varshil27/1mg-train-data-LLama2-formatted
varshil27
2023-10-26T18:30:30Z
0
0
null
[ "license:mit", "region:us" ]
2023-10-26T18:30:30Z
2023-10-26T18:29:24.000Z
2023-10-26T18:29:24
--- license: mit ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
BossBossNJb/cifar100_dataset_th_en
BossBossNJb
2023-10-28T09:14:29Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-28T09:14:29Z
2023-10-26T18:35:54.000Z
2023-10-26T18:35:54
--- license: apache-2.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: img dtype: image - name: fine_label dtype: class_label: names: '0': apple '1': aquarium_fish '2': baby '3': bear '4': beaver '5': bed '6': bee '7': beetle '8': bicycle '9': bottle '10': bowl '11': boy '12': bridge '13': bus '14': butterfly '15': camel '16': can '17': castle '18': caterpillar '19': cattle '20': chair '21': chimpanzee '22': clock '23': cloud '24': cockroach '25': couch '26': cra '27': crocodile '28': cup '29': dinosaur '30': dolphin '31': elephant '32': flatfish '33': forest '34': fox '35': girl '36': hamster '37': house '38': kangaroo '39': keyboard '40': lamp '41': lawn_mower '42': leopard '43': lion '44': lizard '45': lobster '46': man '47': maple_tree '48': motorcycle '49': mountain '50': mouse '51': mushroom '52': oak_tree '53': orange '54': orchid '55': otter '56': palm_tree '57': pear '58': pickup_truck '59': pine_tree '60': plain '61': plate '62': poppy '63': porcupine '64': possum '65': rabbit '66': raccoon '67': ray '68': road '69': rocket '70': rose '71': sea '72': seal '73': shark '74': shrew '75': skunk '76': skyscraper '77': snail '78': snake '79': spider '80': squirrel '81': streetcar '82': sunflower '83': sweet_pepper '84': table '85': tank '86': telephone '87': television '88': tiger '89': tractor '90': train '91': trout '92': tulip '93': turtle '94': wardrobe '95': whale '96': willow_tree '97': wolf '98': woman '99': worm - name: coarse_label dtype: class_label: names: '0': aquatic_mammals '1': fish '2': flowers '3': food_containers '4': fruit_and_vegetables '5': household_electrical_devices '6': household_furniture '7': insects '8': large_carnivores '9': large_man-made_outdoor_things '10': large_natural_outdoor_scenes '11': large_omnivores_and_herbivores '12': medium_mammals '13': non-insect_invertebrates '14': people '15': reptiles '16': small_mammals '17': trees '18': vehicles_1 '19': vehicles_2 - name: fine_label_en dtype: string - name: coarse_label_en dtype: string - name: fine_label_th dtype: string - name: coarse_label_th dtype: string splits: - name: train num_bytes: 118042106.0 num_examples: 50000 - name: test num_bytes: 23663661.0 num_examples: 10000 download_size: 144731114 dataset_size: 141705767.0 ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
Kateway/Thursday
Kateway
2023-10-26T18:42:34Z
0
0
null
[ "region:us" ]
2023-10-26T18:42:34Z
2023-10-26T18:36:22.000Z
2023-10-26T18:36:22
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
JasperHG90/neurips-efficiency-challenge-2023
JasperHG90
2023-10-27T05:09:24Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-27T05:09:24Z
2023-10-26T18:46:50.000Z
2023-10-26T18:46:50
--- license: apache-2.0 dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string - name: dataset dtype: string splits: - name: train num_bytes: 78107861 num_examples: 65209 download_size: 0 dataset_size: 78107861 configs: - config_name: default data_files: - split: train path: data/train-* ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
fia24/filtered_lemma41kV0.0.1
fia24
2023-10-26T18:59:05Z
0
0
null
[ "region:us" ]
2023-10-26T18:59:05Z
2023-10-26T18:58:59.000Z
2023-10-26T18:58:59
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: 'Unnamed: 0' dtype: int64 - name: Inflected_Word dtype: string - name: Lemma dtype: string splits: - name: train num_bytes: 1841860.2133993004 num_examples: 29267 - name: test num_bytes: 230271.85980209926 num_examples: 3659 - name: val num_bytes: 230208.92679860047 num_examples: 3658 download_size: 1233470 dataset_size: 2302341.0 --- # Dataset Card for "filtered_lemma41kV0.0.1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.9202554225921631, -0.09990386664867401, 0.2341238111257553, 0.14038057625293732, -0.6606507301330566, -0.231413334608078, 0.32356178760528564, -0.023887185379862785, 0.7325388193130493, 0.9771038293838501, -1.0233492851257324, -0.8491265773773193, -0.6807514429092407, -0.311274200677871...
null
null
null
null
null
null
null
null
null
null
null
null
null
leffff/south-park-character-png-dataset-old
leffff
2023-10-26T19:12:34Z
0
0
null
[ "license:mit", "region:us" ]
2023-10-26T19:12:34Z
2023-10-26T19:09:39.000Z
2023-10-26T19:09:39
--- license: mit ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
jessica-ecosia/gpdr-dpr-dataset
jessica-ecosia
2023-10-26T20:27:22Z
0
0
null
[ "region:us" ]
2023-10-26T20:27:22Z
2023-10-26T20:27:04.000Z
2023-10-26T20:27:04
--- dataset_info: features: - name: id dtype: string - name: text dtype: string - name: title dtype: string - name: embeddings sequence: sequence: float64 splits: - name: train num_bytes: 4191740 num_examples: 620 download_size: 0 dataset_size: 4191740 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "gpdr-dpr-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5677480697631836, -0.14610232412815094, 0.1915549635887146, 0.18666552007198334, -0.27757027745246887, 0.2013770490884781, 0.34407636523246765, 0.08810213953256607, 0.5866239666938782, 0.24418717622756958, -0.7251662015914917, -0.7302720546722412, -0.7052668333053589, -0.206071600317955...
null
null
null
null
null
null
null
null
null
null
null
null
null
DuckyPolice/Mark-Rober-Voice
DuckyPolice
2023-10-27T20:14:24Z
0
1
null
[ "language:en", "license:wtfpl", "region:us" ]
2023-10-27T20:14:24Z
2023-10-26T20:40:52.000Z
2023-10-26T20:40:52
--- license: wtfpl language: - en pretty_name: Mark Rober Voice Dataset --- this is where i put mark rober voice clips
[ -0.9611343741416931, -0.4431963860988617, 0.8098533749580383, -0.001167741953395307, 0.1518103927373886, 0.36929699778556824, 0.04518892988562584, 0.558419406414032, 0.4793206751346588, 0.9340238571166992, -0.5099937319755554, -0.12221500277519226, -0.5198929905891418, 0.05814408138394356,...
null
null
null
null
null
null
null
null
null
null
null
null
null
dilith/RPA
dilith
2023-10-26T20:48:34Z
0
0
null
[ "region:us" ]
2023-10-26T20:48:34Z
2023-10-26T20:46:27.000Z
2023-10-26T20:46:27
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
gg-ai/es-2610-no-demoji-m
gg-ai
2023-10-26T20:54:11Z
0
0
null
[ "region:us" ]
2023-10-26T20:54:11Z
2023-10-26T20:54:05.000Z
2023-10-26T20:54:05
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: text dtype: string - name: clean_text dtype: string - name: sent dtype: int64 splits: - name: train num_bytes: 14582372 num_examples: 37614 - name: test num_bytes: 2804158 num_examples: 7523 - name: val num_bytes: 728021 num_examples: 1881 download_size: 12052915 dataset_size: 18114551 --- # Dataset Card for "es-2610-no-demoji-m" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5425274968147278, -0.12602877616882324, 0.21978700160980225, 0.0900925025343895, -0.35542452335357666, -0.19865451753139496, 0.05956972390413284, 0.09649823606014252, 1.1491470336914062, 0.6614580750465393, -1.2033846378326416, -0.8871276378631592, -0.5262726545333862, 0.071052126586437...
null
null
null
null
null
null
null
null
null
null
null
null
null
Cam1234K/caze
Cam1234K
2023-10-26T21:01:03Z
0
0
null
[ "region:us" ]
2023-10-26T21:01:03Z
2023-10-26T21:00:43.000Z
2023-10-26T21:00:43
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
MaxReynolds/TestUpload
MaxReynolds
2023-10-26T21:10:24Z
0
0
null
[ "region:us" ]
2023-10-26T21:10:24Z
2023-10-26T21:10:22.000Z
2023-10-26T21:10:22
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: image dtype: image - name: text dtype: string splits: - name: train num_bytes: 1216137.0 num_examples: 10 download_size: 1217696 dataset_size: 1216137.0 --- # Dataset Card for "TestUpload" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5396401286125183, -0.15433679521083832, 0.2535497844219208, 0.22878798842430115, -0.06866474449634552, 0.06890417635440826, 0.17558379471302032, -0.03636077791452408, 0.5921592712402344, 0.4115438163280487, -0.7769063115119934, -0.5829201936721802, -0.522628664970398, -0.116016246378421...
null
null
null
null
null
null
null
null
null
null
null
null
null
faizalnf1800/scifi-book
faizalnf1800
2023-10-27T11:43:33Z
0
0
null
[ "region:us" ]
2023-10-27T11:43:33Z
2023-10-26T21:49:13.000Z
2023-10-26T21:49:13
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: filename dtype: string - name: text dtype: string splits: - name: train num_bytes: 3557355 num_examples: 7 download_size: 2057340 dataset_size: 3557355 --- # Dataset Card for "scifi-book" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5605428814888, -0.05905961990356445, 0.17199988663196564, 0.0825732946395874, -0.13680295646190643, 0.04069529473781586, 0.32265955209732056, -0.21745647490024567, 0.7400928139686584, 0.4678611755371094, -0.9627367854118347, -0.6873226761817932, -0.43588754534721375, -0.1413209885358810...
null
null
null
null
null
null
null
null
null
null
null
null
null
shanchen/OncQA
shanchen
2023-10-30T14:24:19Z
0
1
null
[ "task_categories:conversational", "task_categories:text2text-generation", "language:en", "license:cc-by-sa-4.0", "medical", "arxiv:2310.17703", "region:us" ]
2023-10-30T14:24:19Z
2023-10-26T21:56:33.000Z
2023-10-26T21:56:33
--- license: cc-by-sa-4.0 task_categories: - conversational - text2text-generation language: - en tags: - medical --- ## OncQA: The Impact of Using an AI Chatbot to Respond to Patient Messages ### Importance Documentation burden is a major factor contributing to clinician burnout, which is increasing across the country and threatens our capacity to provide patient care in the U.S. While AI chatbots show potential in reducing this burden by aiding in documentation and are being incorporated into electronic health record systems, their influence on clinical decision-making remains understudied for this purpose. ### Objective Investigate the acceptability, safety, and potential human factors issues when utilizing an AI-powered chatbot to draft responses to patients' inquiries. ### Design - A 2-stage cross-sectional study was designed around 100 synthetic cancer patient scenarios couples with patient messages. - These questions emulate realistic oncology scenarios. - **Stage 1: Manual Reponse**: Six oncologists were randomly allocated 26 questions for response. - **Stage 2: AI-Assisted Response**: The same oncologists received 26 new questions, alongside GPT-4 generated responses for editing. - Informed consent was obtained. - Participants were blinded to the source of the drafts. - Surveys were undertaken for every scenario/response. ### About this repo The dataset shows here is the complete stage2 parsed data with all physician edits. If you wish to see the full data for stage1 and others please visit https://github.com/AIM-Harvard/OncQA/ ### Settings This research was conducted at the Brigham and Women’s Hospital, Boston, MA in 2023. ### Participants Six board-certified oncologists participated. ### Intervention Employment of GPT-4, an AI chatbot, for drafting responses to patient inquiries. ### Main Outcomes & Measures - Evaluate the impact and utility of an AI chatbot in assisting responses to patient messages. - Impact was determined by comparing response length and readability, using the Flesch reading ease score, and content. - Utility was ascertained through physician feedback on surveys regarding acceptability, potential harm, and efficiency of chatbot-crafted drafts. ![Workflow Diagram](z_workflow.png) ### Results - On average, manual responses were more concise than those by GPT-4 or AI-assisted (34 vs. 169 vs. 160 words, p<0.001). - Manual responses were more readable than GPT-4 or AI-assisted messages (Flesch score 67 vs. 45 vs. 46, p<0.001). - About 58% of GPT-4 drafts were immediately acceptable, with 82% posing a low risk of harm. - Utilizing the GPT-4 draft enhanced documentation efficiency in 77% of replies. - Surprisingly, 31% of GPT-4 responses were perceived to be human-written, despite being AI-generated. - 7.7% of survey responses felt unedited GPT-4 drafts could lead to severe harm or death. - Among 56 dual-annotated responses, annotation agreement was low for manual responses (Cohen's kappa 0.10), but improved for AI-assisted responses (Cohen's kappa 0.52). - AI-assistance led to differences in clinical content in the responses (p=0.001). - Manual replies were more likely to advise direct clinical actions, while GPT-4 drafts often provided educational and self-management suggestions. - AI-aided replies closely mirrored GPT-4 drafts but introduced some direct clinical actions. ### Conclusions & Relevance AI-generated chatbot responses, while lengthier and less accessible, were overall safe and improved efficiency. AI-assistance altered the nature of physician feedback and reduced variability. AI chatbots are a promising avenue to address physician burnout and could improve patient care, however interactions between humans and AI might affect clinical decisions in unexpected ways. Addressing these interactions is vital for the safe incorporation of such technologies. **Note**: It's imperative to delve deeper into human-AI interactions and their potential impact on outcomes. # Citation: ``` @misc{chen2023impact, title={The impact of using an AI chatbot to respond to patient messages}, author={Shan Chen and Marco Guevara and Shalini Moningi and Frank Hoebers and Hesham Elhalawani and Benjamin H. Kann and Fallon E. Chipidza and Jonathan Leeman and Hugo J. W. L. Aerts and Timothy Miller and Guergana K. Savova and Raymond H. Mak and Maryam Lustberg and Majid Afshar and Danielle S. Bitterman}, year={2023}, eprint={2310.17703}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ -0.14325712621212006, -0.7822430729866028, 0.6545742750167847, -0.13948217034339905, 0.05744228512048721, -0.03647534176707268, 0.05405807867646217, -0.49419549107551575, 0.05557833984494209, 0.4636591374874115, -0.5202140212059021, -0.2292354255914688, -0.6836906671524048, 0.1419873237609...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/combined_train_dataset
ajdesh2000
2023-10-27T04:51:13Z
0
0
null
[ "region:us" ]
2023-10-27T04:51:13Z
2023-10-26T22:12:50.000Z
2023-10-26T22:12:50
--- dataset_info: features: - name: mmlu_id dtype: string - name: group_id dtype: string - name: category dtype: string - name: perturb_type dtype: string - name: split_used dtype: string - name: instruction dtype: string - name: output dtype: string - name: combined_id dtype: string - name: bbq_id dtype: string - name: is_ambiguous dtype: string - name: is_negative dtype: string - name: bb_id dtype: string - name: section dtype: string - name: task dtype: string - name: subtask dtype: string - name: org_task dtype: string - name: bb_stem_id dtype: string - name: math_id dtype: string - name: tqa_id dtype: string - name: gsm_id dtype: string - name: verbose dtype: string splits: - name: train num_bytes: 3617631 num_examples: 6548 download_size: 1503383 dataset_size: 3617631 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "combined_train_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.656489908695221, 0.005107908044010401, 0.09976444393396378, 0.3163338303565979, -0.2945636510848999, 0.10346268862485886, 0.13076330721378326, -0.18179093301296234, 0.8206993937492371, 0.38185060024261475, -0.803554356098175, -0.48288145661354065, -0.56342613697052, -0.38361790776252747...
null
null
null
null
null
null
null
null
null
null
null
null
null
marcus2000/new_sentiment
marcus2000
2023-10-27T11:45:52Z
0
0
null
[ "region:us" ]
2023-10-27T11:45:52Z
2023-10-26T22:21:23.000Z
2023-10-26T22:21:23
--- dataset_info: features: - name: text dtype: string - name: sentiment dtype: string splits: - name: train num_bytes: 9012929 num_examples: 6195 download_size: 4355943 dataset_size: 9012929 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "new_sentiment" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.8315935730934143, -0.266109436750412, 0.11059225350618362, 0.333214670419693, -0.2539852261543274, 0.030647587031126022, 0.17469044029712677, -0.0820111334323883, 1.0581367015838623, 0.4076903760433197, -0.87833571434021, -0.9123340845108032, -0.7248945832252502, -0.2941277325153351, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
ManuBansal/33param_snp500_trainingSet
ManuBansal
2023-10-27T10:02:05Z
0
0
null
[ "region:us" ]
2023-10-27T10:02:05Z
2023-10-26T22:48:58.000Z
2023-10-26T22:48:58
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
CLMBR/mSCAN
CLMBR
2023-09-05T05:27:21Z
0
1
null
[ "license:bsd", "region:us" ]
2023-09-05T05:27:21Z
2023-10-26T23:03:04.000Z
2023-10-26T23:03:04
--- license: bsd ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
thanhduycao/soict_train_dataset_filter
thanhduycao
2023-10-27T01:02:51Z
0
0
null
[ "region:us" ]
2023-10-27T01:02:51Z
2023-10-27T01:01:46.000Z
2023-10-27T01:01:46
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: id dtype: string - name: sentence dtype: string - name: intent dtype: string - name: sentence_annotation dtype: string - name: entities list: - name: type dtype: string - name: filler dtype: string - name: file dtype: string - name: audio struct: - name: array sequence: float64 - name: path dtype: string - name: sampling_rate dtype: int64 - name: origin_transcription dtype: string - name: sentence_norm dtype: string - name: sentence_norm_v2 dtype: string - name: w2v2_large_transcription dtype: string - name: wer dtype: float64 splits: - name: train num_bytes: 3205296038.433596 num_examples: 6184 - name: test num_bytes: 566006350.9006286 num_examples: 1092 download_size: 902006355 dataset_size: 3771302389.3342247 --- # Dataset Card for "soict_train_dataset_filter" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.694554328918457, -0.21458078920841217, 0.2668079733848572, 0.23534752428531647, -0.32218611240386963, -0.13577410578727722, 0.25669077038764954, -0.039863139390945435, 0.7503968477249146, 0.6452674865722656, -1.0817183256149292, -0.7084898352622986, -0.5952388048171997, -0.3052372634410...
null
null
null
null
null
null
null
null
null
null
null
null
null
NissrineH/AIS-Australian_Institute_of_Sports
NissrineH
2023-10-27T01:53:38Z
0
0
null
[ "region:us" ]
2023-10-27T01:53:38Z
2023-10-27T01:38:06.000Z
2023-10-27T01:38:06
--- configs: - config_name: ais data_files: "ais.csv" sep: ";" default: true ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
rmadrig/docubot0.3
rmadrig
2023-10-27T02:03:58Z
0
0
null
[ "region:us" ]
2023-10-27T02:03:58Z
2023-10-27T02:03:11.000Z
2023-10-27T02:03:11
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
hudssntao/prompt_learning_paper
hudssntao
2023-10-27T02:12:10Z
0
0
null
[ "region:us" ]
2023-10-27T02:12:10Z
2023-10-27T02:07:27.000Z
2023-10-27T02:07:27
--- dataset_info: features: - name: newColumn dtype: string - name: new_colmmn dtype: string splits: - name: train num_bytes: 80 num_examples: 6 download_size: 0 dataset_size: 80 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "prompt_learning_paper" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.52732253074646, -0.3580431044101715, 0.46519801020622253, 0.027236035093665123, 0.02936519868671894, 0.08405851572751999, 0.18482260406017303, 0.15869638323783875, 0.6773757338523865, 0.23410338163375854, -0.8190841674804688, -0.8002583384513855, -0.573523759841919, -0.23368456959724426...
null
null
null
null
null
null
null
null
null
null
null
null
null
hudssntao/test_dataset
hudssntao
2023-10-27T03:27:15Z
0
0
null
[ "region:us" ]
2023-10-27T03:27:15Z
2023-10-27T02:43:19.000Z
2023-10-27T02:43:19
--- dataset_info: features: - name: column1 dtype: string - name: column2 dtype: string splits: - name: train num_bytes: 40 num_examples: 2 download_size: 1227 dataset_size: 40 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "test_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6507143378257751, -0.3762452006340027, 0.05513935536146164, 0.19198383390903473, -0.1418946385383606, 0.027945304289460182, 0.2564050257205963, -0.03172285109758377, 0.7297998666763306, 0.32808583974838257, -0.8236618041992188, -0.679920494556427, -0.5159001350402832, -0.185640141367912...
null
null
null
null
null
null
null
null
null
null
null
null
null
zhongshupeng/dataset_4090_2
zhongshupeng
2023-10-27T03:01:33Z
0
0
null
[ "region:us" ]
2023-10-27T03:01:33Z
2023-10-27T02:47:49.000Z
2023-10-27T02:47:49
# Disclaimer: this dataset is curated for NeurIPS 2023 LLM efficiency challange, and currently work in progress. Please use at your own risk. # Data composition: All data were derived from the training set portion of the open source dataset. **gsm2k_dolly15k_cnnadd6k_mmlulog1.7w_bbqabc8k.json**: -gsm8k_2000: https://huggingface.co/datasets/gsm8k -dolly_15000: https://huggingface.co/datasets/databricks/databricks-dolly-15k -cnn_dailymail_6000: https://huggingface.co/datasets/cnn_dailymail -mmlu_17000: https://huggingface.co/datasets/cais/mmlu -bbq_8000: https://huggingface.co/datasets/tasksource/bigbench **lima_4kall.json** -lima_1000: https://huggingface.co/datasets/GAIR/lima -3000 of gsm8k_dolly15k_cnnadd8k_mmlulog1.7w_bbqabc8k.json: https://huggingface.co/datasets/zhongshupeng/dataset_4090_1
[ -0.34032511711120605, -0.2551673352718353, 0.14629331231117249, 0.36150819063186646, -0.20670266449451447, -0.2799280285835266, -0.08784983307123184, -0.3247692883014679, 0.14161312580108643, 0.5079676508903503, -0.7651481032371521, -0.57308429479599, -0.860714852809906, 0.2458783537149429...
null
null
null
null
null
null
null
null
null
null
null
null
null
zhongshupeng/dataset_4090_3
zhongshupeng
2023-10-27T03:04:31Z
0
0
null
[ "region:us" ]
2023-10-27T03:04:31Z
2023-10-27T03:02:27.000Z
2023-10-27T03:02:27
# Disclaimer: this dataset is curated for NeurIPS 2023 LLM efficiency challange, and currently work in progress. Please use at your own risk. # Data composition: All data were derived from the training set portion of the open source dataset. **gsm2k_dolly12k_cnnadd4k_mmlulog1.7w_bbqabc8k.json**: -gsm8k_2000: https://huggingface.co/datasets/gsm8k -dolly_12000: https://huggingface.co/datasets/databricks/databricks-dolly-15k -cnn_dailymail_4000: https://huggingface.co/datasets/cnn_dailymail -mmlu_17000: https://huggingface.co/datasets/cais/mmlu -bbq_8000: https://huggingface.co/datasets/tasksource/bigbench
[ -0.18777377903461456, -0.37556037306785583, 0.10099199414253235, 0.4024302065372467, -0.2240002602338791, -0.2821134328842163, -0.054978739470243454, -0.2994988262653351, 0.1580953150987625, 0.5112112164497375, -0.7873906493186951, -0.5722082853317261, -0.8458289504051208, 0.22947658598423...
null
null
null
null
null
null
null
null
null
null
null
null
null
gaurav16/temples_dataset
gaurav16
2023-10-27T03:22:25Z
0
1
null
[ "task_categories:question-answering", "size_categories:1M<n<10M", "language:en", "license:apache-2.0", "art", "region:us" ]
2023-10-27T03:22:25Z
2023-10-27T03:15:21.000Z
2023-10-27T03:15:21
--- license: apache-2.0 task_categories: - question-answering language: - en tags: - art size_categories: - 1M<n<10M --- # Dataset Card for Dataset Name: Indian Temple Destruction Dataset ## Dataset Details ### Dataset Description The Indian Temple Destruction Dataset provides information about historical temples that were destroyed in the past in India, including details on the locations of these temples, the entities responsible for their destruction, and contact information for inquiries. - **Curated by:** Gaurav Sinha - **Funded by [optional]:** [Information Not Available] - **Shared by [optional]:** [Information Not Available] - **Language(s) (NLP):** English - **License:** [Information Not Available] ### Dataset Sources [optional] - **Repository:** [Link to the dataset repository] - **Paper [optional]:** [Link to any associated research paper] - **Demo [optional]:** [Link to a demo or usage example] ## Uses ### Direct Use This dataset can be used for historical research, cultural preservation efforts, and to understand the history of temple destruction in India. ### Out-of-Scope Use Misuse of this dataset for promoting hatred, violence, or discrimination is strictly out of scope. ## Dataset Structure [Information Not Available] ## Dataset Creation ### Curation Rationale The dataset was created to document the historical information about the destruction of temples in India for research, education, and preservation purposes. It includes data from books authored by Sir Sita Ram Goel and contributions by Gaurav Sinha. ### Source Data #### Data Collection and Processing The data for this dataset was collected from historical records, scholarly research, and reputable sources, including books authored by Sir Sita Ram Goel. It also includes contributions by Gaurav Sinha. The dataset was carefully compiled to provide accurate and valuable information. #### Who are the source data producers? The source data was produced by historians, researchers, and scholars, including Sir Sita Ram Goel, who documented the destruction of temples in India. Contributions by Gaurav Sinha are also part of the dataset. ### Annotations [optional] [Information Not Available] ## Bias, Risks, and Limitations This dataset may contain historical events that could be sensitive to some communities. It is essential to use this data responsibly and with cultural sensitivity. ### Recommendations Users should exercise caution when using this dataset to ensure that it is used for educational and research purposes and not for promoting hatred or discrimination. ## Citation [optional] **BibTeX:** [Information Not Available] **APA:** [Information Not Available] ## Glossary [optional] [Information Not Available] ## More Information [optional] [Information Not Available] ## Dataset Card Authors [optional] Gaurav Sinha ## Dataset Card Contact For inquiries related to this dataset, please contact [Your Email Address].
[ -0.2815943956375122, -0.7278581261634827, -0.0652102679014206, -0.2408096343278885, -0.212367981672287, -0.2657972276210785, -0.042629484087228775, -0.18901734054088593, 0.208668053150177, 0.5147459506988525, -0.3436063528060913, -0.2523821294307709, -0.486144483089447, -0.0762684941291809...
null
null
null
null
null
null
null
null
null
null
null
null
null
hudssntao/test2
hudssntao
2023-10-30T05:11:50Z
0
0
null
[ "region:us" ]
2023-10-30T05:11:50Z
2023-10-27T03:32:52.000Z
2023-10-27T03:32:52
--- dataset_info: features: - name: column1 dtype: string - name: column2 dtype: string splits: - name: train num_bytes: 100 num_examples: 5 download_size: 1255 dataset_size: 100 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "test2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4582540988922119, -0.2834520936012268, 0.10296005010604858, 0.17352311313152313, -0.11084074527025223, -0.05248580873012543, 0.3387722969055176, -0.24119366705417633, 0.5015091300010681, 0.23152005672454834, -0.7243699431419373, -0.5032135844230652, -0.592483401298523, -0.32758662104606...
null
null
null
null
null
null
null
null
null
null
null
null
null
GalacticV/Aria_3
GalacticV
2023-10-27T03:45:47Z
0
0
null
[ "license:openrail", "region:us" ]
2023-10-27T03:45:47Z
2023-10-27T03:45:19.000Z
2023-10-27T03:45:19
--- license: openrail ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
Deojoandco/capstone_hal_without_gold
Deojoandco
2023-10-27T04:03:05Z
0
0
null
[ "region:us" ]
2023-10-27T04:03:05Z
2023-10-27T04:02:56.000Z
2023-10-27T04:02:56
--- dataset_info: features: - name: dialog_id dtype: int32 - name: source sequence: string - name: tags sequence: class_label: names: '0': C '1': M '2': N '3': O '4': OB '5': W splits: - name: train num_bytes: 239933 num_examples: 76 - name: validation num_bytes: 47958 num_examples: 12 - name: test num_bytes: 27286 num_examples: 12 download_size: 35488 dataset_size: 315177 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* --- # Dataset Card for "capstone_hal_without_gold" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.605787992477417, -0.1554795354604721, 0.25844693183898926, 0.14861024916172028, -0.2806704044342041, 0.12146967649459839, 0.10940679162740707, 0.012198840267956257, 0.802009105682373, 0.7608605623245239, -1.0439482927322388, -1.06490957736969, -0.6064119935035706, -0.32288751006126404, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
Deojoandco/capstone_hal_with_gold
Deojoandco
2023-10-27T04:03:41Z
0
0
null
[ "region:us" ]
2023-10-27T04:03:41Z
2023-10-27T04:03:31.000Z
2023-10-27T04:03:31
--- dataset_info: features: - name: dialog_id dtype: int32 - name: source sequence: string - name: tags sequence: class_label: names: '0': C '1': M '2': N '3': O '4': OB '5': W splits: - name: train num_bytes: 268989 num_examples: 76 - name: validation num_bytes: 53862 num_examples: 12 - name: test num_bytes: 31570 num_examples: 12 download_size: 39058 dataset_size: 354421 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* --- # Dataset Card for "capstone_hal_with_gold" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5391637682914734, -0.16037903726100922, 0.18673206865787506, 0.21095812320709229, -0.24163644015789032, 0.16723419725894928, 0.133174329996109, 0.02422843687236309, 0.72165447473526, 0.7122332453727722, -0.9561489224433899, -1.0192598104476929, -0.6433219909667969, -0.35375458002090454,...
null
null
null
null
null
null
null
null
null
null
null
null
null
ycchen/oasst_lima_arc
ycchen
2023-10-27T04:18:07Z
0
0
null
[ "region:us" ]
2023-10-27T04:18:07Z
2023-10-27T04:12:45.000Z
2023-10-27T04:12:45
--- dataset_info: features: - name: conversations sequence: string - name: source dtype: string splits: - name: train num_bytes: 8102880 num_examples: 4970 download_size: 4569911 dataset_size: 8102880 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "oasst_lima_arc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4864670932292938, -0.36508870124816895, 0.26985153555870056, 0.2524107098579407, -0.4499008059501648, -0.07223482429981232, 0.557105302810669, -0.2534183859825134, 1.0505040884017944, 0.6378107070922852, -0.753572404384613, -0.8424663543701172, -0.7790226936340332, -0.3178056478500366, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
hsiungc/midi_preprocess
hsiungc
2023-10-27T04:22:32Z
0
0
null
[ "region:us" ]
2023-10-27T04:22:32Z
2023-10-27T04:21:04.000Z
2023-10-27T04:21:04
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
atmallen/quirky_math_bob_grader_last_1.0e_0.0p_finetuning
atmallen
2023-10-27T04:33:48Z
0
0
null
[ "region:us" ]
2023-10-27T04:33:48Z
2023-10-27T04:33:46.000Z
2023-10-27T04:33:46
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: statement dtype: string - name: choices sequence: string - name: label dtype: class_label: names: '0': 'False' '1': 'True' - name: true_label dtype: bool splits: - name: train num_bytes: 11540623 num_examples: 200000 - name: validation num_bytes: 1159427 num_examples: 20000 - name: test num_bytes: 1159757 num_examples: 20000 download_size: 3315827 dataset_size: 13859807 --- # Dataset Card for "quirky_math_bob_grader_last_1.0e_0.0p_finetuning" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5353923439979553, -0.4462967813014984, 0.04909525811672211, 0.1990516036748886, -0.08364056795835495, 0.13611921668052673, 0.2616478502750397, 0.21452173590660095, 0.8046326041221619, 0.3691927194595337, -0.6812226176261902, -0.7429847717285156, -0.46506789326667786, -0.3756935298442840...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/pegasus_combined_general_train_dataset
ajdesh2000
2023-10-27T04:45:15Z
0
0
null
[ "region:us" ]
2023-10-27T04:45:15Z
2023-10-27T04:45:12.000Z
2023-10-27T04:45:12
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/pegasus_cnn_train_dataset
ajdesh2000
2023-10-27T04:46:12Z
0
0
null
[ "region:us" ]
2023-10-27T04:46:12Z
2023-10-27T04:45:59.000Z
2023-10-27T04:45:59
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/pegasus_combined_math_train_dataset
ajdesh2000
2023-10-27T04:46:29Z
0
0
null
[ "region:us" ]
2023-10-27T04:46:29Z
2023-10-27T04:46:27.000Z
2023-10-27T04:46:27
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/combined_train_dataset_v2
ajdesh2000
2023-10-27T04:52:02Z
0
0
null
[ "region:us" ]
2023-10-27T04:52:02Z
2023-10-27T04:52:01.000Z
2023-10-27T04:52:01
Entry not found
[ -0.3227649927139282, -0.225684255361557, 0.862226128578186, 0.43461498618125916, -0.5282987952232361, 0.7012963891029358, 0.7915717363357544, 0.07618629932403564, 0.7746025919914246, 0.2563219666481018, -0.7852816581726074, -0.2257382869720459, -0.9104480743408203, 0.5715669393539429, -0...
null
null
null
null
null
null
null
null
null
null
null
null
null
anonymouse03052002/kan-ds-mini
anonymouse03052002
2023-10-27T06:34:59Z
0
0
null
[ "region:us" ]
2023-10-27T06:34:59Z
2023-10-27T04:54:12.000Z
2023-10-27T04:54:12
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* dataset_info: features: - name: text dtype: string - name: text_length dtype: int64 splits: - name: train num_bytes: 49353.04 num_examples: 88 - name: validation num_bytes: 5608.3 num_examples: 10 download_size: 0 dataset_size: 54961.340000000004 --- # Dataset Card for "kan-ds-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6755123734474182, -0.3698708415031433, 0.4359018802642822, -0.0411810576915741, -0.3280141353607178, 0.14830709993839264, 0.33652472496032715, -0.13088572025299072, 1.1562507152557373, 0.37982580065727234, -1.110518217086792, -0.7078540325164795, -0.5961225032806396, -0.1611413359642028...
null
null
null
null
null
null
null
null
null
null
null
null
null
Raspado/akashicruz
Raspado
2023-10-27T04:59:09Z
0
0
null
[ "region:us" ]
2023-10-27T04:59:09Z
2023-10-27T04:58:48.000Z
2023-10-27T04:58:48
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
ajdesh2000/exodus_combined_general_train_dataset_v2
ajdesh2000
2023-10-27T05:15:33Z
0
0
null
[ "region:us" ]
2023-10-27T05:15:33Z
2023-10-27T05:15:31.000Z
2023-10-27T05:15:31
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
tianyang/repo_dedup_sep2023
tianyang
2023-10-27T05:26:00Z
0
0
null
[ "region:us" ]
2023-10-27T05:26:00Z
2023-10-27T05:21:30.000Z
2023-10-27T05:21:30
--- dataset_info: features: - name: repo_name dtype: string - name: language dtype: string - name: created_at dtype: timestamp[ns] - name: license dtype: string - name: description dtype: string - name: stars dtype: int64 - name: forks dtype: int64 - name: url dtype: string - name: repo_code list: - name: code dtype: string - name: path dtype: string - name: repo_name dtype: string - name: size dtype: int64 splits: - name: train num_bytes: 219555370 num_examples: 1474 download_size: 71458940 dataset_size: 219555370 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "repo_dedup_sep2023" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6491106152534485, 0.015168366022408009, 0.15312829613685608, 0.30156251788139343, -0.4750048518180847, -0.0459556020796299, 0.35508209466934204, -0.06410972028970718, 0.778992772102356, 0.9242304563522339, -0.767052173614502, -0.7398926019668579, -0.5733391642570496, 0.08256310224533081...
null
null
null
null
null
null
null
null
null
null
null
null
null
ayushtues/scalecrafter
ayushtues
2023-10-27T06:35:59Z
0
0
null
[ "region:us" ]
2023-10-27T06:35:59Z
2023-10-27T05:32:20.000Z
2023-10-27T05:32:20
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
anlp/anno1_w_elimination
anlp
2023-10-27T05:53:10Z
0
0
null
[ "region:us" ]
2023-10-27T05:53:10Z
2023-10-27T05:53:09.000Z
2023-10-27T05:53:09
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: sentences sequence: string - name: ner_tags sequence: string splits: - name: train num_bytes: 1239484 num_examples: 917 download_size: 249472 dataset_size: 1239484 --- # Dataset Card for "anno1_w_elimination" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.8156099319458008, -0.5207178592681885, 0.18735156953334808, 0.0334324985742569, -0.3329024314880371, -0.21352368593215942, 0.1579374223947525, -0.285712331533432, 0.9137885570526123, 0.5406435132026672, -1.088383674621582, -0.7901684045791626, -0.6470345258712769, 0.05946199595928192, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
fia24/filtered_lemma41kV0.0.2
fia24
2023-10-27T06:08:22Z
0
0
null
[ "region:us" ]
2023-10-27T06:08:22Z
2023-10-27T06:08:14.000Z
2023-10-27T06:08:14
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: 'Unnamed: 0' dtype: int64 - name: Inflected_Word dtype: string - name: Lemma dtype: string splits: - name: train num_bytes: 1794357.4941723635 num_examples: 28553 - name: test num_bytes: 224349.67443684858 num_examples: 3570 - name: val num_bytes: 224286.83139078785 num_examples: 3569 download_size: 1201505 dataset_size: 2242994.0 --- # Dataset Card for "filtered_lemma41kV0.0.2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.7807761430740356, -0.07682950794696808, 0.2576483488082886, 0.12355754524469376, -0.6665325164794922, -0.2185286283493042, 0.29988953471183777, -0.12817822396755219, 0.5937254428863525, 0.9433826208114624, -0.8829838037490845, -0.7138899564743042, -0.7345120310783386, -0.356464207172393...
null
null
null
null
null
null
null
null
null
null
null
null
null
anonymouse03052002/final
anonymouse03052002
2023-10-27T06:19:06Z
0
0
null
[ "region:us" ]
2023-10-27T06:19:06Z
2023-10-27T06:16:20.000Z
2023-10-27T06:16:20
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
Adminhuggingface/LORA_ONE_DATA
Adminhuggingface
2023-10-27T06:18:33Z
0
0
null
[ "region:us" ]
2023-10-27T06:18:33Z
2023-10-27T06:18:32.000Z
2023-10-27T06:18:32
--- dataset_info: features: - name: image dtype: image - name: text dtype: string splits: - name: train num_bytes: 2493084.0 num_examples: 6 download_size: 2495157 dataset_size: 2493084.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "LORA_ONE_DATA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6144790649414062, -0.5636203289031982, 0.10654405504465103, 0.20722703635692596, -0.3459134101867676, -0.23514814674854279, 0.5067978501319885, -0.1451525241136551, 1.1996934413909912, 0.8216912746429443, -0.8607468008995056, -0.9316003322601318, -0.5291064381599426, -0.4263136088848114...
null
null
null
null
null
null
null
null
null
null
null
null
null
yuyijiong/Chinese_Paper_QA
yuyijiong
2023-11-21T05:56:27Z
0
4
null
[ "size_categories:1K<n<10K", "language:zh", "license:cc-by-nc-4.0", "region:us" ]
2023-11-21T05:56:27Z
2023-10-27T06:26:05.000Z
2023-10-27T06:26:05
--- license: cc-by-nc-4.0 language: - zh size_categories: - 1K<n<10K --- # 中文论文问答数据集 * 来自知网的论文数据,版权受限,不能直接公开。下载后请勿上传到公开场合。 * 包括 为论文写摘要、基于论文内容的问答 两个任务。论文摘要任务已经迁移到[论文摘要数据集](https://huggingface.co/datasets/yuyijiong/Chinese_Paper_Abstract/settings)中。 ## 改进版 * 此数据集中筛选出较长的论文,并为每篇论文设计多个任务,形成新数据集:[中文论文多任务数据集](https://huggingface.co/datasets/yuyijiong/Paper_mutli_QA_Chinese)
[ -0.23716020584106445, -0.6736099123954773, 0.1382209211587906, 0.9450911283493042, -0.5760934948921204, -0.2784157991409302, 0.08401540666818619, -0.6131497621536255, 0.5810509324073792, 0.5054718852043152, -0.09770719707012177, -0.5832504630088806, -0.4903348684310913, 0.2172250598669052,...
null
null
null
null
null
null
null
null
null
null
null
null
null
Facico/test2
Facico
2023-10-27T06:48:29Z
0
0
null
[ "region:us" ]
2023-10-27T06:48:29Z
2023-10-27T06:33:47.000Z
2023-10-27T06:33:47
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
Facico/test3
Facico
2023-10-27T06:49:10Z
0
0
null
[ "region:us" ]
2023-10-27T06:49:10Z
2023-10-27T06:47:58.000Z
2023-10-27T06:47:58
Entry not found
[ -0.32276472449302673, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965679168701, 0.7915717363357544, 0.07618629932403564, 0.7746022939682007, 0.2563222646713257, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
SergioSCA/Structures
SergioSCA
2023-10-27T07:07:46Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-27T07:07:46Z
2023-10-27T07:06:26.000Z
2023-10-27T07:06:26
--- license: apache-2.0 --- # Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
[ -0.5322356224060059, -0.5534716844558716, 0.1290130317211151, 0.23470577597618103, -0.39626216888427734, -0.11762470006942749, -0.03545305132865906, -0.6389272212982178, 0.5699822306632996, 0.7838326692581177, -0.7834625840187073, -0.9173274040222168, -0.55633145570755, 0.13078093528747559...
null
null
null
null
null
null
null
null
null
null
null
null
null
sanak/bergains_conversatation
sanak
2023-10-27T08:05:05Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-27T08:05:05Z
2023-10-27T08:03:15.000Z
2023-10-27T08:03:15
--- license: apache-2.0 ---
[ -0.1285335123538971, -0.1861683875322342, 0.6529128551483154, 0.49436232447624207, -0.19319400191307068, 0.23607441782951355, 0.36072009801864624, 0.05056373029947281, 0.5793656706809998, 0.7400146722793579, -0.650810182094574, -0.23784008622169495, -0.7102247476577759, -0.0478255338966846...
null
null
null
null
null
null
null
null
null
null
null
null
null
anlp/relabel_SciERC
anlp
2023-10-27T18:37:16Z
0
0
null
[ "region:us" ]
2023-10-27T18:37:16Z
2023-10-27T08:13:15.000Z
2023-10-27T08:13:15
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: sentences sequence: string - name: ner_tags sequence: string - name: predict sequence: string - name: new_gt sequence: string splits: - name: train num_bytes: 2267323 num_examples: 3238 download_size: 312123 dataset_size: 2267323 --- # Dataset Card for "relabel_SciERC" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.40777233242988586, -0.13727444410324097, 0.14870765805244446, 0.19577175378799438, -0.10280448198318481, 0.26819801330566406, 0.3310016691684723, -0.1903052031993866, 1.1033525466918945, 0.23379307985305786, -0.8441823124885559, -0.9980806708335876, -0.6962820887565613, -0.0849573686718...
null
null
null
null
null
null
null
null
null
null
null
null
null
anlp/sentence_w_elimination
anlp
2023-10-27T08:43:53Z
0
0
null
[ "region:us" ]
2023-10-27T08:43:53Z
2023-10-27T08:22:29.000Z
2023-10-27T08:22:29
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: sentences sequence: string - name: new_gt sequence: string splits: - name: train num_bytes: 1201528 num_examples: 990 download_size: 244599 dataset_size: 1201528 --- # Dataset Card for "sentence_w_elimination" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4536062180995941, -0.672946572303772, 0.37418338656425476, 0.10779257863759995, -0.09167028218507767, -0.23970523476600647, -0.18801642954349518, -0.07656294107437134, 0.728037416934967, 0.5249900817871094, -0.9946773648262024, -0.7989006042480469, -0.7362025380134583, -0.03986965119838...
null
null
null
null
null
null
null
null
null
null
null
null
null
AudioDecBenchmark/superb_ks
AudioDecBenchmark
2023-10-27T08:43:49Z
0
0
null
[ "region:us" ]
2023-10-27T08:43:49Z
2023-10-27T08:42:59.000Z
2023-10-27T08:42:59
--- configs: - config_name: default data_files: - split: original path: data/original-* - split: descript_audio_codec path: data/descript_audio_codec-* - split: encodec_hf path: data/encodec_hf-* - split: speech_tokenizer path: data/speech_tokenizer-* dataset_info: features: - name: audio dtype: audio: sampling_rate: 16000 - name: id dtype: string splits: - name: original num_bytes: 98824867.676 num_examples: 3081 - name: descript_audio_codec num_bytes: 272081821.676 num_examples: 3081 - name: encodec_hf num_bytes: 148225621.676 num_examples: 3081 - name: speech_tokenizer num_bytes: 98929621.676 num_examples: 3081 download_size: 544447448 dataset_size: 618061932.704 --- # Dataset Card for "superb_ks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6468480229377747, -0.17227262258529663, 0.37695661187171936, 0.11557427048683167, -0.4018844962120056, 0.08160924166440964, 0.4733448028564453, -0.11718341708183289, 0.9612210392951965, 0.5792251229286194, -0.9466887712478638, -0.8612930178642273, -0.6601135730743408, -0.210449039936065...
null
null
null
null
null
null
null
null
null
null
null
null
null
anlp/sentence_anno
anlp
2023-10-27T08:49:02Z
0
0
null
[ "region:us" ]
2023-10-27T08:49:02Z
2023-10-27T08:49:00.000Z
2023-10-27T08:49:00
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: sentences sequence: string - name: new_gt sequence: string splits: - name: train num_bytes: 1201528 num_examples: 990 download_size: 244599 dataset_size: 1201528 --- # Dataset Card for "sentence_anno" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.4229704439640045, -0.5876678824424744, 0.2536723017692566, 0.4034993052482605, -0.1844683736562729, -0.43831270933151245, -0.1497698426246643, -0.3895839750766754, 0.7839658260345459, 0.6945579648017883, -0.7737098932266235, -0.7382442951202393, -0.6158249378204346, 0.1062358021736145, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
ManuBansal/33param_snp500_validationSet
ManuBansal
2023-10-27T09:17:41Z
0
0
null
[ "region:us" ]
2023-10-27T09:17:41Z
2023-10-27T09:16:52.000Z
2023-10-27T09:16:52
Entry not found
[ -0.3227645754814148, -0.22568479180335999, 0.8622263669967651, 0.43461522459983826, -0.52829909324646, 0.7012971639633179, 0.7915719747543335, 0.07618614286184311, 0.774603009223938, 0.2563217282295227, -0.7852813005447388, -0.22573819756507874, -0.9104475975036621, 0.5715674161911011, -...
null
null
null
null
null
null
null
null
null
null
null
null
null
MrDontKnowWhatToDo/Intent_Recognition
MrDontKnowWhatToDo
2023-10-27T11:01:55Z
0
0
null
[ "region:us" ]
2023-10-27T11:01:55Z
2023-10-27T09:19:22.000Z
2023-10-27T09:19:22
{ "intents": [ { "tag": "busy", "pattern": [ "बाहर हु", "व्यस्त हु", "बिजी हु", "बात नई कर सकता", "व्यस्त", "बिजी हूँ", "व्यस्त हूँ", "मीटिंग में हूँ", "अभी समय नहीं है", "बात नहीं कर सकते", "वर्तमान में कोई फ्री समय नहीं", "डेडलाइन पर ध्यान केंद्रित", "पूरी तरह व्यस्त, कोई विश्राम नहीं", "एक के बाद एक मीटिंगों में व्यस्त", "रात-दिन काम कर रहा हूँ", "काम में उलझ रहा हूँ", "बहुत काम बचा है", "समय की कमी है", "वापस कॉल करो", "रविवार को कॉल करो", "बात नई कर सकता", "कॉल करो", "बदमे कॉल करो", "थोड़ी देर बाद कॉल करो", "अभी बात नई कर सकता", "नहीं" ], "response": [ "यह कॉल आपके लिए महत्वपूर्ण है। इसे नजरंदाज करने से आपके सीबील स्कोर पर नकारात्मक प्रभाव पड़ेगा" ] }, { "tag": "hi", "pattern": [ "हैलो", "हई", "हेलो", "हेल", "नमस्ते", "नमस्कार", "हैलो", "हई", "नमस्ते", "हेलो", "हाइ" ], "response": [ "नमस्कार , सोनाटा फाइनैन्स से मे स्वरा बोल रही हु। हमने <client_name> के लोन की राशी लौटाने के संधरभ मे कॉल किया है। यह कॉल गुणवत्ता एवं ट्रैनिंग केलिए रेकॉर्ड हो राहा है" ] }, { "tag": "agent visit date", "pattern": [ "कल भेज दो", "परसों भेज दो", "दो दिन बाद भेजो", "एक", "दो", "तीन", "चार", "पांच", "छे", "सात", "आठ", "जानेवारी", "फेब्रुएरी", "मार्च", "एप्रिल", "मे", "जून", "जुलाई", "अउगेस्ट", "सप्टेंबर", "ऑक्टोबर", "नवंबर", "दिसम्बर" ], "response": [ "धन्यवाद, हमारा एजेंट आपसे <callback_time> पर संपर्क करेगा" ] }, { "tag": "Refuse to Pay", "pattern": [ "नई दे सकता", "पैसे नई है", "नई दे पाऊँगा", "पैसे नई है", "नई", "मजबूरी है", "पैसे काम है", "प्रयास कर रहा हु", "नई दे सकता", "ना", "नहीं", "नही", "नहीं पाऊंगा", "नहीं कर सकता", "मत भेजो", "भेजना मत" ], "response": [ "हम समजते है की आपको कठिनाई है । लेकिन भूकतान जमा करने मे किसी भी तरीके की देरी आपका सीबील स्कोर खराब कर सकती है । जिससे आपको भविष्य मे लोन मिलने मे कठिनाई होंगी" ] }, { "tag": "miscellaneous question", "pattern": [ "कोनसा लॉन", "क्या लोन", "मैंने कब लोन लिया", "किस लोन की बात कर रहे हो", "मैंने कोई लोन नई लिया", "किसका मेम", "किस चीज का पैसा मेम" ], "response": [ "ये कॉल आपने जो सोनाटा के साथ लोन लिया था उसके संबंध मे किया गया है । आपकी पिछली <due_installments> किश्त और <emi_amount> रुपए बकाया है।" ] }, { "tag": "Agree to Pay", "pattern": [ "ठीक है", "दे दूंगा", "आजाओ", "दे सकता हु", "दे दूंगा", "हा कर दूंगा", "कर दूंगा", "कर देता हु", "हाँ", "हा कर दूंगा", "कर दूंगा", "आज अजाऊँगा ब्रांच", "ब्रांच मे अजाऊँगा", "का आ जाऊंगा", "जी", "जी बिल्कुल कर देंगे", "जी कर देंगे आज", "हाँ कर देंगे", "ठीक है", "हा कर सकते हैं", "जी कर सकते हैं", "आज नई हो पाएगा", "आज नई हो सकता", "आज बिजी हु", "कल करता हु", "आज मुश्किल है", "आज नहीं हो पाएगा कल कर देंगे", "नहीं हो पाएगा जी आज", "कल देंगे कल" ], "response": [ "कृपया आप <emi_amount> तैयार रखें हमारा एजेंट पैसे लेने के लिए आपके पास आएगा,धन्यवाद." ] }, { "tag": "insist to pay tomorrow", "pattern": [ "बाद मे देता हु", "नहीं कर सकता", "नई कर", "नहीं", "नहीं दे सकता", "आज नहीं हो पाएगा", "नहीं कर पाऊंगा", "नई कर सकता", "नई हो पाएगा", "नई हो पाएगा", "नहीं" ], "response": [ "कृपया आप <emi_amount> तैयार रखें हमारा एजेंट पैसे लेने के लिए आपके पास आएगा,धन्यवाद." ] } ] }
[ -0.547844648361206, -0.619499146938324, 0.2045697271823883, 0.2567000389099121, -0.5999494791030884, 0.060249630361795425, 0.2595105469226837, -0.30200818181037903, 0.5205317139625549, 0.4797389805316925, -0.8007798790931702, -0.30457568168640137, -0.5124197006225586, 0.14243464171886444, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
fia24/filtered_lemma41kV0.0.3
fia24
2023-10-27T10:04:32Z
0
0
null
[ "region:us" ]
2023-10-27T10:04:32Z
2023-10-27T10:04:26.000Z
2023-10-27T10:04:26
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: 'Unnamed: 0' dtype: int64 - name: Inflected_Word dtype: string - name: Lemma dtype: string splits: - name: train num_bytes: 1789172.8 num_examples: 28460 - name: test num_bytes: 223678.03311314125 num_examples: 3558 - name: val num_bytes: 223615.16688685876 num_examples: 3557 download_size: 1196489 dataset_size: 2236466.0 --- # Dataset Card for "filtered_lemma41kV0.0.3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.878312349319458, -0.07037398964166641, 0.3704755902290344, 0.1552964299917221, -0.616121768951416, -0.25180181860923767, 0.3484106957912445, -0.12381936609745026, 0.6283587217330933, 0.9797435402870178, -0.8986760973930359, -0.8071246147155762, -0.6424158215522766, -0.22542324662208557,...
null
null
null
null
null
null
null
null
null
null
null
null
null
makram93/accepted_pairs_50
makram93
2023-10-27T12:18:50Z
0
0
null
[ "region:us" ]
2023-10-27T12:18:50Z
2023-10-27T10:15:10.000Z
2023-10-27T10:15:10
--- dataset_info: features: - name: url dtype: string - name: doc_id dtype: string - name: original_title sequence: string - name: right dtype: string - name: left dtype: string splits: - name: train num_bytes: 88447.0623234648 num_examples: 100 download_size: 78941 dataset_size: 88447.0623234648 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "accepted_pairs_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6784986257553101, -0.05580506846308708, 0.2806207537651062, 0.637488842010498, -0.2993049621582031, -0.001294652814976871, 0.19931598007678986, -0.12876352667808533, 0.7121413350105286, 0.3591538071632385, -0.6462562680244446, -0.8087993264198303, -0.5901947021484375, 0.0420612134039402...
null
null
null
null
null
null
null
null
null
null
null
null
null
makram93/rejected_pairs_50
makram93
2023-10-27T12:18:53Z
0
0
null
[ "region:us" ]
2023-10-27T12:18:53Z
2023-10-27T10:15:13.000Z
2023-10-27T10:15:13
--- dataset_info: features: - name: url dtype: string - name: doc_id dtype: string - name: original_title sequence: string - name: right dtype: string - name: left dtype: string splits: - name: train num_bytes: 88447.0623234648 num_examples: 100 download_size: 85583 dataset_size: 88447.0623234648 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "rejected_pairs_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6542280912399292, -0.22231179475784302, 0.22475633025169373, 0.5518919825553894, -0.27039170265197754, 0.08694721758365631, 0.30336835980415344, 0.14966906607151031, 0.6973177194595337, 0.4950818717479706, -0.9095996618270874, -0.7431848645210266, -0.46743226051330566, 0.103414706885814...
null
null
null
null
null
null
null
null
null
null
null
null
null
rohit901/nlp_proj_llm_hallucination
rohit901
2023-10-27T11:58:09Z
0
0
null
[ "region:us" ]
2023-10-27T11:58:09Z
2023-10-27T10:49:03.000Z
2023-10-27T10:49:03
--- configs: - config_name: default data_files: - split: gpt3_5 path: "gpt3_5_hallucination.json" - split: gpt4 path: "gpt4_hallucination.json" ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
cellfabrik/algae
cellfabrik
2023-10-27T11:10:28Z
0
0
null
[ "license:apache-2.0", "region:us" ]
2023-10-27T11:10:28Z
2023-10-27T11:08:53.000Z
2023-10-27T11:08:53
--- license: apache-2.0 ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
omgbobbyg/Zip-Code-to-Timezone
omgbobbyg
2023-10-27T11:29:34Z
0
0
null
[ "region:us" ]
2023-10-27T11:29:34Z
2023-10-27T11:14:09.000Z
2023-10-27T11:14:09
--- # For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/datasets-cards {} --- # Dataset Card for Zip Code to Timezone Offset Mapping <!-- Provide a quick summary of the dataset. --> This dataset maps Zip Codes and Postal Codes for the USA and Canada to the relevant timezone offset. ## Dataset Details ### Dataset Description In addition to providing a mapping from a Zip Code or Postal Code to timezone offset, it also contains the timezone offset for DST (if observed). - **Curated by:** Bobby Gill, BlueLabel ### Acknowledgements <!-- Provide the basic links for the dataset. --> - **Based off the Work Here:** [https://www.kaggle.com/datasets/joeleichter/us-zip-codes-with-lat-and-long]
[ -0.3051850199699402, -0.1368466019630432, 0.3812446892261505, 0.44428542256355286, -0.5207868814468384, 0.16188406944274902, 0.11151707917451859, -0.5652583241462708, 0.6409721374511719, 0.5932638049125671, -0.7575901746749878, -1.0192091464996338, -0.4654953181743622, -0.1968892365694046,...
null
null
null
null
null
null
null
null
null
null
null
null
null
capjamesg/taylor-swift-records
capjamesg
2023-10-27T11:37:16Z
0
1
null
[ "license:mit", "region:us" ]
2023-10-27T11:37:16Z
2023-10-27T11:34:24.000Z
2023-10-27T11:34:24
--- license: mit ---
[ -0.12853392958641052, -0.18616779148578644, 0.6529127955436707, 0.49436280131340027, -0.19319361448287964, 0.23607419431209564, 0.36072003841400146, 0.050563063472509384, 0.579365611076355, 0.7400140762329102, -0.6508104205131531, -0.23783954977989197, -0.7102249264717102, -0.0478260256350...
null
null
null
null
null
null
null
null
null
null
null
null
null
qgyd2021/writing_a_novel
qgyd2021
2023-10-27T12:00:25Z
0
0
null
[ "task_categories:question-answering", "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100M<n<1B", "language:zh", "license:apache-2.0", "region:us" ]
2023-10-27T12:00:25Z
2023-10-27T11:47:10.000Z
2023-10-27T11:47:10
--- license: apache-2.0 task_categories: - question-answering - text-generation - text2text-generation language: - zh size_categories: - 100M<n<1B --- ## Writing a Novel 写小说: (1)下载小说。 (2)利用大模型将小说重新整理成一句一行。 (3)利用大模型对小说的各段落写一个摘要。 (4)摘要做 query,小说内容做 response,训练模型。
[ 0.10592611134052277, -0.8897490501403809, 0.49335992336273193, 1.0205897092819214, -0.45419538021087646, -0.2021535038948059, -0.08455521613359451, -0.051929980516433716, 0.28193771839141846, 0.49974313378334045, -0.3684998154640198, -0.5903579592704773, -0.5790866613388062, 0.158467665314...
null
null
null
null
null
null
null
null
null
null
null
null
null
philschmid/mt-bench
philschmid
2023-10-27T12:27:18Z
0
1
null
[ "region:us" ]
2023-10-27T12:27:18Z
2023-10-27T12:27:08.000Z
2023-10-27T12:27:08
Entry not found
[ -0.3227647542953491, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965083122253, 0.7915717959403992, 0.07618629932403564, 0.7746022343635559, 0.2563222348690033, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
AudreyYZY/demo
AudreyYZY
2023-10-27T13:31:25Z
0
0
null
[ "region:us" ]
2023-10-27T13:31:25Z
2023-10-27T12:54:55.000Z
2023-10-27T12:54:55
Entry not found
[ -0.3227647542953491, -0.22568407654762268, 0.8622258901596069, 0.4346148371696472, -0.5282984972000122, 0.7012965083122253, 0.7915717959403992, 0.07618629932403564, 0.7746022343635559, 0.2563222348690033, -0.785281777381897, -0.22573848068714142, -0.9104482531547546, 0.5715669393539429, ...
null
null
null
null
null
null
null
null
null
null
null
null
null
makram93/rejected_pairs_base
makram93
2023-10-27T15:03:14Z
0
0
null
[ "region:us" ]
2023-10-27T15:03:14Z
2023-10-27T13:04:22.000Z
2023-10-27T13:04:22
--- dataset_info: features: - name: url dtype: string - name: doc_id dtype: string - name: original_title sequence: string - name: right dtype: string - name: left dtype: string splits: - name: train num_bytes: 88447.0623234648 num_examples: 100 download_size: 0 dataset_size: 88447.0623234648 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "rejected_pairs_base" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.5597395896911621, -0.38346073031425476, 0.1865488588809967, 0.472183495759964, -0.35733017325401306, 0.10772011429071426, 0.21217410266399384, 0.17023241519927979, 0.7932339310646057, 0.5946432948112488, -0.790540874004364, -0.7697962522506714, -0.5360861420631409, -0.09114068001508713,...
null
null
null
null
null
null
null
null
null
null
null
null
null
mponty/web_crawl_docs
mponty
2023-10-27T13:18:47Z
0
0
null
[ "region:us" ]
2023-10-27T13:18:47Z
2023-10-27T13:18:10.000Z
2023-10-27T13:18:10
--- dataset_info: features: - name: text dtype: string - name: lang dtype: string - name: source dtype: string - name: url dtype: string - name: id dtype: string splits: - name: train num_bytes: 1157430740 num_examples: 87370 download_size: 492924255 dataset_size: 1157430740 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "web_crawl_docs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
[ -0.6027529239654541, -0.12610092759132385, 0.37284764647483826, 0.10937613993883133, -0.21105289459228516, -0.03408947214484215, 0.2258095145225525, 0.06755482405424118, 0.76572585105896, 0.19517964124679565, -0.8539220094680786, -1.0619927644729614, -0.5854493379592896, -0.092741869390010...
null
null
null
null
null
null
null
null
null
null
null
null
null