shp / README.md
transZ's picture
Update README.md
8c5db3e verified
metadata
dataset_info:
  features:
    - name: rejected
      list:
        - name: content
          dtype: string
        - name: role
          dtype: string
    - name: chosen
      list:
        - name: content
          dtype: string
        - name: role
          dtype: string
    - name: rejected_score
      dtype: int64
    - name: chosen_score
      dtype: int64
  splits:
    - name: validation
      num_bytes: 14343514
      num_examples: 5132
    - name: test
      num_bytes: 14934583
      num_examples: 5152
  download_size: 8143968
  dataset_size: 29278097
configs:
  - config_name: default
    data_files:
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
license: mit
task_categories:
  - text-classification
language:
  - en
size_categories:
  - 1K<n<10K

This is a test split and a validation split from the original dataset shp. I have adapted it to match the version used in RLHFlow.

Below is the code you can use to recreate this.

from datasets import load_dataset

split = 'test'
ds = load_dataset('stanfordnlp/SHP', split=split)

def filter_example(example):
    prompt = example['history']
    if example['labels'] == 0:
        ratio = example['score_B'] * 1.0 / example['score_A']
    elif example['labels'] == 1:
        ratio = example['score_A'] * 1.0 / example['score_B']
    
    if ratio > 2.0:
        return True
    else:
        return False

ds_filtered = ds.filter(filter_example)

####
import itertools
from collections import defaultdict
import json
data_ret = defaultdict(list)

data = []

for example in ds_filtered:
    prompt = example['history']

    if example['score_A'] > example['score_B']:
        assert example['labels'] == 1
        
        chosen_message = [
            {"content": prompt, "role": "user"},
                    {"content": example['human_ref_A'], "role": "assistant"},
        ]
        rejected_message = [
            {"content": prompt, "role": "user"},
                    {"content": example['human_ref_B'], "role": "assistant"},
        ]
        chosen_rating = example['score_A']
        rejected_rating = example['score_B']

    elif example['score_A'] < example['score_B']:
        assert example['labels'] == 0
        
        chosen_message = [
            {"content": prompt, "role": "user"},
                    {"content": example['human_ref_B'], "role": "assistant"},
        ]
        rejected_message = [
            {"content": prompt, "role": "user"},
                    {"content": example['human_ref_A'], "role": "assistant"},
        ]
        chosen_rating = example['score_B']
        rejected_rating = example['score_A']
    else:
        print("error")

    
    data_ret[prompt].append({"rejected": rejected_message, "chosen": chosen_message, "rejected_score": rejected_rating, "chosen_score": chosen_rating})

print(len(data_ret))
                            
    
for key in data_ret:
    num_responses = len(data_ret[key])
    data.extend(data_ret[key][:5])

print(len(data))
with open(f'shp_{split}.jsonl', 'w+', encoding='utf-8') as fout:
    for d in data:
        fout.write(json.dumps(d, ensure_ascii=False) + "\n")