| import os |
| import time |
| import random |
| import requests |
| import datasets |
| from tqdm import tqdm |
| from io import BytesIO |
| from PIL import Image |
|
|
| _URL = "https://www.missevan.com" |
|
|
| _HEADER = { |
| "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0" |
| } |
|
|
|
|
| class insecta_sounds(datasets.GeneratorBasedBuilder): |
| def _info(self): |
| return datasets.DatasetInfo( |
| features=datasets.Features( |
| { |
| "audio": datasets.Value("string"), |
| "image": datasets.Image(), |
| "label": datasets.Value("string"), |
| "latin": datasets.Value("string"), |
| } |
| ), |
| supervised_keys=("audio", "latin"), |
| homepage=f"https://www.modelscope.cn/datasets/Genius-Society/{os.path.basename(__file__)[:-3]}", |
| license="CC-BY-NC-ND", |
| version="0.0.1", |
| ) |
|
|
| def _get_files(self, drama_id=73247, page_size=100): |
| try: |
| response = requests.get( |
| f"{_URL}/dramaapi/getdramaepisodedetails", |
| params={"drama_id": drama_id, "p": 1, "page_size": page_size}, |
| headers=_HEADER, |
| ) |
| response.raise_for_status() |
| return response.json()["info"]["Datas"] |
|
|
| except Exception as e: |
| print(f"{e}, retrying...") |
| time.sleep(random.randint(3, 5)) |
| return self._get_files() |
|
|
| def _dld_img(self, url: str): |
| try: |
| response = requests.get(url, headers=_HEADER) |
| response.raise_for_status() |
| return Image.open(BytesIO(response.content)) |
|
|
| except Exception as e: |
| print(f"{e}, retrying...") |
| return self._dld_img(url) |
|
|
| def _split_generators(self, _): |
| dataset = [] |
| files = self._get_files() |
| for file in tqdm(files, desc="Parsing classes"): |
| label, latin = str(file["soundstr"]).split(" ", 1) |
| dataset.append( |
| { |
| "audio": f"{_URL}/soundiframe/{file['id']}?type=small", |
| "image": self._dld_img(file["front_cover"]), |
| "label": label.strip(), |
| "latin": latin.strip(), |
| } |
| ) |
|
|
| random.shuffle(dataset) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"files": dataset}, |
| ) |
| ] |
|
|
| def _generate_examples(self, files): |
| for i, path in enumerate(files): |
| yield i, path |
|
|