wikipedia-utterances / transform_wikipedia.py
jspaulsen's picture
Upload transform_wikipedia.py with huggingface_hub
82a0609 verified
import nltk
from nltk.tokenize import sent_tokenize, BlanklineTokenizer
from datasets import load_dataset
import pyarrow as pa
import pyarrow.parquet as pq
import re
from pathlib import Path
nltk.download('punkt')
nltk.download('punkt_tab')
def strip_brackets(text: str) -> str:
"""
Remove parentheses (), braces {}, and brackets [] along with their contents.
"""
text = re.sub(r'\([^)]*\)', '', text)
text = re.sub(r'\{[^}]*\}', '', text)
text = re.sub(r'\[[^\]]*\]', '', text)
text = re.sub(r'\s+', ' ', text).strip()
return text
def is_valid_utterance(
utterance: str,
minimum_utterance_length: int = 10,
) -> bool:
utterance = utterance.strip()
# If _any_ newlines are present after stripping the end.
if utterance.count('\n'):
return False
if len(utterance) < minimum_utterance_length:
return False
if not utterance.endswith(('.', '!', '?')):
if not utterance.endswith(('"', "'")):
return False
if len(utterance) >= 2:
if utterance[-2] not in ('.', '!', '?'):
return False
if any(char in utterance for char in '(){}[]'):
return False
return True
def estimate_speech_duration(
text: str,
wpm: int = 150,
) -> float:
"""
Estimate speech duration in seconds.
Average speaking rate is ~150 words per minute.
"""
words = text.split()
return (len(words) / wpm) * 60
def combine_sentences_in_paragraph(
sentences: list[str],
max_duration: float = 30.0,
) -> list[str]:
"""
Combine consecutive sentences within a paragraph if the combined duration
falls between min_duration and max_duration seconds.
"""
if not sentences:
return []
result = []
current_chunk = []
current_duration = 0.0
for sentence in sentences:
valid_utterance = is_valid_utterance(sentence)
sentence = sentence.strip()
# If this is the first sentence in a chunk (and it's valid)
if not current_chunk:
if valid_utterance:
current_chunk.append(sentence)
current_duration = estimate_speech_duration(sentence)
continue
# If the utterance is not valid, finalize the current chunk
if not valid_utterance:
result.append(' '.join(current_chunk))
current_chunk = []
continue
sentence_duration = estimate_speech_duration(sentence)
potential_duration = current_duration + sentence_duration
# If adding the sentence would not exceed max_duration, add it to the current chunk
# Otherwise, finalize the current chunk and start a new one
if potential_duration <= max_duration:
current_chunk.append(sentence)
current_duration = potential_duration
continue
# Otherwise, finalize the current chunk and start a new one
result.append(' '.join(current_chunk))
current_chunk = [sentence]
current_duration = sentence_duration
# Finalize a remaining chunk if it exists.
if current_chunk:
result.append(' '.join(current_chunk))
return result
def transform_text(text: str) -> list[str]:
"""
Transform Wikipedia text into valid utterances.
Returns a list of valid utterance strings.
"""
paragraph_tokenizer = BlanklineTokenizer()
paragraphs = paragraph_tokenizer.tokenize(text)
valid_utterances = []
for segment in paragraphs:
sentences = sent_tokenize(segment)
combined_sentences = combine_sentences_in_paragraph(sentences)
for sentence in combined_sentences:
if is_valid_utterance(sentence):
valid_utterances.append(sentence)
return valid_utterances
def process_example(example: dict) -> list[dict]:
"""
Process a single Wikipedia example, transforming the 'text' field
into individual utterance rows.
"""
utterances = transform_text(example['text'])
return [
{
'title': example['title'],
'text': utterance,
'duration': estimate_speech_duration(utterance),
}
for utterance in utterances
]
def flush_batch(rows: list[dict], output_dir: Path, shard_index: int) -> None:
"""Write a batch of rows to a Parquet shard."""
if not rows:
return
table = pa.Table.from_pydict({
'title': [r['title'] for r in rows],
'text': [r['text'] for r in rows],
'duration': [r['duration'] for r in rows],
})
shard_path = output_dir / f"shard_{shard_index:05d}.parquet"
pq.write_table(table, shard_path)
print(f" Wrote shard {shard_index} with {len(rows)} utterances")
def main():
output_dir = Path('wikipedia_utterances')
output_dir.mkdir(exist_ok=True)
batch_size = 100_000 # Flush to disk every 100k utterances
print("Loading Wikipedia dataset...")
dataset = load_dataset(
'wikimedia/wikipedia',
'20231101.en',
split='train',
streaming=True,
)
print("Processing dataset...")
rows = []
shard_index = 0
total_utterances = 0
for i, example in enumerate(dataset):
utterance_rows = process_example(example)
rows.extend(utterance_rows)
if len(rows) >= batch_size:
flush_batch(rows, output_dir, shard_index)
total_utterances += len(rows)
shard_index += 1
rows = []
if (i + 1) % 10_000 == 0:
print(f"Processed {i + 1} articles, {total_utterances + len(rows)} utterances so far")
# Flush remaining rows
if rows:
flush_batch(rows, output_dir, shard_index)
total_utterances += len(rows)
print(f"\nDone! Saved {total_utterances} utterances across {shard_index + 1} shards to '{output_dir}/'")
print(f"Load with: load_dataset('parquet', data_files='{output_dir}/*.parquet')")
if __name__ == "__main__":
main()