Datasets:

Modalities:
Text
Formats:
json
Libraries:
Datasets
pandas
Counterfact / croissant.json
PerAsperaAd's picture
Upload croissant.json
a45cd48 verified
{"@context":{"@language":"en","@vocab":"https://schema.org/","arrayShape":"cr:arrayShape","citeAs":"cr:citeAs","column":"cr:column","conformsTo":"dct:conformsTo","cr":"http://mlcommons.org/croissant/","data":{"@id":"cr:data","@type":"@json"},"dataBiases":"cr:dataBiases","dataCollection":"cr:dataCollection","dataType":{"@id":"cr:dataType","@type":"@vocab"},"dct":"http://purl.org/dc/terms/","extract":"cr:extract","field":"cr:field","fileProperty":"cr:fileProperty","fileObject":"cr:fileObject","fileSet":"cr:fileSet","format":"cr:format","includes":"cr:includes","isArray":"cr:isArray","isLiveDataset":"cr:isLiveDataset","jsonPath":"cr:jsonPath","key":"cr:key","md5":"cr:md5","parentField":"cr:parentField","path":"cr:path","personalSensitiveInformation":"cr:personalSensitiveInformation","recordSet":"cr:recordSet","references":"cr:references","regex":"cr:regex","repeated":"cr:repeated","replace":"cr:replace","sc":"https://schema.org/","separator":"cr:separator","source":"cr:source","subField":"cr:subField","transform":"cr:transform"},"@type":"sc:Dataset","distribution":[{"@type":"cr:FileObject","@id":"repo","name":"repo","description":"The Hugging Face git repository.","contentUrl":"https://huggingface.co/datasets/DataAttributionEval/Counterfact/tree/refs%2Fconvert%2Fparquet","encodingFormat":"git+https","sha256":"https://github.com/mlcommons/croissant/issues/80"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-Llama-3.1-8B","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"Llama-3.1-8B/*/*.parquet"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-Llama-3.2-1B","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"Llama-3.2-1B/*/*.parquet"},{"@type":"cr:FileSet","@id":"parquet-files-for-config-Pythia-1b","containedIn":{"@id":"repo"},"encodingFormat":"application/x-parquet","includes":"Pythia-1b/*/*.parquet"}],"recordSet":[{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"Llama-3.1-8B_splits/split_name"},"@id":"Llama-3.1-8B_splits","name":"Llama-3.1-8B_splits","description":"Splits for the Llama-3.1-8B config.","field":[{"@type":"cr:Field","@id":"Llama-3.1-8B_splits/split_name","dataType":"sc:Text"}],"data":[{"Llama-3.1-8B_splits/split_name":"train"},{"Llama-3.1-8B_splits/split_name":"ref"}]},{"@type":"cr:RecordSet","@id":"Llama-3.1-8B","description":"DataAttributionEval/Counterfact - 'Llama-3.1-8B' subset\n\nAdditional information:\n- 2 splits: train, ref","field":[{"@type":"cr:Field","@id":"Llama-3.1-8B/split","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"Llama\\-3\\.1\\-8B/(?:partial-)?(train|ref)/.+parquet$"}},"references":{"field":{"@id":"Llama-3.1-8B_splits/split_name"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/prompt","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"prompt"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/response","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"response"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/true_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"true_entity"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/counterfactual_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"counterfactual_entity"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/type","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"type"}}},{"@type":"cr:Field","@id":"Llama-3.1-8B/id","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.1-8B"},"extract":{"column":"id"}}}]},{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"Llama-3.2-1B_splits/split_name"},"@id":"Llama-3.2-1B_splits","name":"Llama-3.2-1B_splits","description":"Splits for the Llama-3.2-1B config.","field":[{"@type":"cr:Field","@id":"Llama-3.2-1B_splits/split_name","dataType":"sc:Text"}],"data":[{"Llama-3.2-1B_splits/split_name":"train"},{"Llama-3.2-1B_splits/split_name":"ref"}]},{"@type":"cr:RecordSet","@id":"Llama-3.2-1B","description":"DataAttributionEval/Counterfact - 'Llama-3.2-1B' subset\n\nAdditional information:\n- 2 splits: train, ref","field":[{"@type":"cr:Field","@id":"Llama-3.2-1B/split","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"Llama\\-3\\.2\\-1B/(?:partial-)?(train|ref)/.+parquet$"}},"references":{"field":{"@id":"Llama-3.2-1B_splits/split_name"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/prompt","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"prompt"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/response","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"response"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/true_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"true_entity"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/counterfactual_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"counterfactual_entity"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/type","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"type"}}},{"@type":"cr:Field","@id":"Llama-3.2-1B/id","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Llama-3.2-1B"},"extract":{"column":"id"}}}]},{"@type":"cr:RecordSet","dataType":"cr:Split","key":{"@id":"Pythia-1b_splits/split_name"},"@id":"Pythia-1b_splits","name":"Pythia-1b_splits","description":"Splits for the Pythia-1b config.","field":[{"@type":"cr:Field","@id":"Pythia-1b_splits/split_name","dataType":"sc:Text"}],"data":[{"Pythia-1b_splits/split_name":"train"},{"Pythia-1b_splits/split_name":"ref"}]},{"@type":"cr:RecordSet","@id":"Pythia-1b","description":"DataAttributionEval/Counterfact - 'Pythia-1b' subset\n\nAdditional information:\n- 2 splits: train, ref","field":[{"@type":"cr:Field","@id":"Pythia-1b/split","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"fileProperty":"fullpath"},"transform":{"regex":"Pythia\\-1b/(?:partial-)?(train|ref)/.+parquet$"}},"references":{"field":{"@id":"Pythia-1b_splits/split_name"}}},{"@type":"cr:Field","@id":"Pythia-1b/prompt","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"prompt"}}},{"@type":"cr:Field","@id":"Pythia-1b/response","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"response"}}},{"@type":"cr:Field","@id":"Pythia-1b/true_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"true_entity"}}},{"@type":"cr:Field","@id":"Pythia-1b/counterfactual_entity","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"counterfactual_entity"}}},{"@type":"cr:Field","@id":"Pythia-1b/type","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"type"}}},{"@type":"cr:Field","@id":"Pythia-1b/id","dataType":"sc:Text","source":{"fileSet":{"@id":"parquet-files-for-config-Pythia-1b"},"extract":{"column":"id"}}}]}],"conformsTo":"http://mlcommons.org/croissant/1.1","name":"Counterfact","description":"\n\t\n\t\t\n\t\tOverview\n\t\n\nThis dataset is designed to evaluate data attribution methods for factual tracing. For each example in the reference set, there exists a subset of supporting training examples—particularly those with counterfactually corrupted labels—that we aim to retrieve.\n\n\t\n\t\t\n\t\n\t\n\t\tImportantly, all models are fine-tuned on the same training set, but each model has its own reference set, which captures the specific instances that expose counterfactual behavior during evaluation.… See the full description on the dataset page: https://huggingface.co/datasets/DataAttributionEval/Counterfact.","alternateName":["DataAttributionEval/Counterfact"],"creator":{"@type":"Organization","name":"DATE-LM (Data Attribution Evaluation in Language Models)","url":"https://huggingface.co/DataAttributionEval"},"keywords":["10K - 100K","json","Text","Datasets","pandas","Croissant","Polars","🇺🇸 Region: US"],"url":"https://huggingface.co/datasets/DataAttributionEval/Counterfact"}