aglazkova/bart_finetuned_keyphrase_extraction
Updated
• 3 • 14
id int64 52.8k 1.04M | document sequence | doc_bio_tags sequence |
|---|---|---|
502,567 | [
"--",
"T",
"Detecting",
"graph-based",
"spatial",
"outliers",
".",
"--",
"A",
"of",
"outliers",
"can",
"lead",
"to",
"the",
"discovery",
"of",
"unexpected",
",",
"interesting",
",",
"and",
"useful",
"knowledge",
".",
"Existing",
"methods",
"are",
"designed",
... | [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"... |
506,154 | ["--","T","Task","assignment","with","unknown","duration",".","--","A","We","consider","a","distribu(...TRUNCATED) | ["O","O","B","I","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
504,212 | ["--","T","Analysis","and","comparison","of","two","general","sparse","solvers","for","distributed",(...TRUNCATED) | ["O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
502,094 | ["--","T","Efficient","generation","of","shared","RSA","keys",".","--","A","We","describe","efficien(...TRUNCATED) | ["O","O","O","O","O","O","B","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","B"(...TRUNCATED) |
507,259 | ["--","T","Axioms","for","real-time","logics",".","--","A","This","paper","presents","a","complete",(...TRUNCATED) | ["O","O","O","O","O","O","O","O","O","O","O","O","O","O","B","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
384,249 | ["--","T","Fast","priority","queues","for","cached","memory",".","--","A","The","cache","hierarchy",(...TRUNCATED) | ["O","O","O","O","O","O","O","O","O","O","O","O","B","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
507,059 | ["--","T","The","Impulse","Memory","Controller",".","--","A","AbstractImpulse","is","a","memory","sy(...TRUNCATED) | ["O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
500,486 | ["--","T","A","Survey","of","Energy","Efficient","Network","Protocols","for","Wireless","Networks","(...TRUNCATED) | ["O","O","O","O","O","O","O","B","I","O","B","I","O","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
506,906 | ["--","T","Negotiation-based","protocols","for","disseminating","information","in","wireless","senso(...TRUNCATED) | ["O","O","O","O","O","O","O","O","B","I","I","O","O","O","O","O","O","O","O","O","O","O","O","O","O"(...TRUNCATED) |
501,419 | ["--","T","Neighborhood","aware","source","routing",".","--","A","A","novel","approach","to","source(...TRUNCATED) | ["O","O","O","O","B","I","O","O","O","O","O","O","O","B","I","O","B","I","I","O","O","O","O","O","O"(...TRUNCATED) |
YAML Metadata Warning: empty or missing yaml metadata in repo card
Check out the documentation for more information.
A dataset for benchmarking keyphrase extraction and generation techniques from long document english scientific papers. For more details about the dataset please refer the original paper - https://www.semanticscholar.org/paper/Large-Dataset-for-Keyphrases-Extraction-Krapivin-Autaeu/2c56421ff3c2a69894d28b09a656b7157df8eb83 Original source of the data -
| Split | #datapoints |
|---|---|
| Test | 2305 |
from datasets import load_dataset
# get entire dataset
dataset = load_dataset("midas/krapivin", "raw")
# sample from the test split
print("Sample from test dataset split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Document BIO Tags: ", test_sample["doc_bio_tags"])
print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"])
print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"])
print("\n-----------\n")
Output
from datasets import load_dataset
# get the dataset only for keyphrase extraction
dataset = load_dataset("midas/krapivin", "extraction")
print("Samples for Keyphrase Extraction")
# sample from the test split
print("Sample from test data split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Document BIO Tags: ", test_sample["doc_bio_tags"])
print("\n-----------\n")
# get the dataset only for keyphrase generation
dataset = load_dataset("midas/krapivin", "generation")
print("Samples for Keyphrase Generation")
# sample from the test split
print("Sample from test data split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"])
print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"])
print("\n-----------\n")
@inproceedings{Krapivin2009LargeDF,
title={Large Dataset for Keyphrases Extraction},
author={Mikalai Krapivin and Aliaksandr Autaeu and Maurizio Marchese},
year={2009}
}
Thanks to @debanjanbhucs, @dibyaaaaax and @ad6398 for adding this dataset