| | import csv |
| | from transformers import AutoTokenizer |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yarn-Llama-2-7B-128K-GPTQ", use_fast=True) |
| |
|
| | |
| | with open('input.txt', 'r') as f: |
| | data = f.readlines() |
| |
|
| | |
| | train_data = [] |
| | test_data = [] |
| | current_row = "" |
| | current_token_count = 0 |
| | carry_over = "" |
| |
|
| | |
| | for i, line in enumerate(data): |
| | line_to_add = carry_over + line.strip() |
| | carry_over = "" |
| | |
| | |
| | tokens = tokenizer(line_to_add)['input_ids'] |
| | num_tokens = len(tokens) |
| |
|
| | |
| | if current_token_count + num_tokens > 1024: |
| | |
| | last_period_idx = current_row.rfind('. ') |
| | |
| | if last_period_idx != -1: |
| | |
| | carry_over = current_row[last_period_idx+2:].strip() + "\n" |
| | current_row = current_row[:last_period_idx+1] |
| | |
| | if i < len(data) * 0.9: |
| | train_data.append(current_row.strip()) |
| | else: |
| | test_data.append(current_row.strip()) |
| | |
| | current_row = carry_over |
| | current_token_count = len(tokenizer(current_row.strip())['input_ids']) |
| |
|
| | |
| | current_row += (line_to_add + "\n") if current_row else (line_to_add + "\n") |
| | current_token_count += num_tokens |
| |
|
| | |
| | with open('train.csv', 'w', newline='') as f: |
| | writer = csv.writer(f) |
| | writer.writerow(['Text']) |
| | for row in train_data: |
| | writer.writerow([row]) |
| |
|
| | with open('test.csv', 'w', newline='') as f: |
| | writer = csv.writer(f) |
| | writer.writerow(['Text']) |
| | for row in test_data: |
| | writer.writerow([row]) |
| |
|