Linguistic_Prior / scripts /gen_random_char.py
gaotiexinqu's picture
Upload folder using huggingface_hub
7f605ec verified
import random
import json
import os
def load_vocab_zh():
COMMON_CHARS = ""
with open("/vol/zhaoy/ds-ocr/scripts/zh_level1_3500.txt", "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split('\t')
if len(parts) >= 2: # 保证至少有编号和汉字
char = parts[1]
# 只保留真正的汉字(防止空行或错误字符)
if '\u4e00' <= char <= '\u9fff':
COMMON_CHARS += char
# print(f"✅ 共读取 {len(COMMON_CHARS)} 个汉字。")
# print(COMMON_CHARS[:50])
return COMMON_CHARS
def random_common_chinese(COMMON_CHARS, length):
return ''.join(random.choice(COMMON_CHARS) for _ in range(length))
def generate_dataset(num_samples, save_path):
"""生成多个样本并保存为 JSON"""
data = []
os.makedirs("images", exist_ok=True)
for i in range(1, num_samples + 1):
sample_id = f"RS{i:03d}"
length = 2000 # 长度
COMMON_CHARS = load_vocab_zh()
content = random_common_chinese(COMMON_CHARS, length)
image_path = f"images/{sample_id}.png" # 可后续生成图片时用
data.append({
"id": sample_id,
"image_path": image_path,
"content": content
})
# 保存为 JSON 文件
with open(save_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
print(f"✅ 已生成 {num_samples} 条样本,保存至:{save_path}")
if __name__ == "__main__":
save_path = "/vol/zhaoy/ds-ocr/data/rand_zh_2k/meta.json"
generate_dataset(10, save_path) # 样本数量