{"repo_id":"MetaMathQA","entity_id":"py:run","uri":"program://MetaMathQA/module/run#L1-L473","kind":"module","name":"run","path":"run.py","language":"python","start_line":1,"end_line":473,"context_start_line":1,"context_end_line":473,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMain entry point to run the experiments. Contains general setup and the proper training code.\n\"\"\"\n\nimport argparse\nimport datetime as dt\nimport gc\nimport json\nimport os\nimport random\nimport sys\nimport textwrap\nimport time\nfrom contextlib import nullcontext\nfrom functools import partial\nfrom typing import Any, Callable, Literal, Optional\n\nimport torch\nfrom data import get_train_valid_test_datasets\nfrom torch import nn\nfrom torch.amp import GradScaler, autocast\nfrom tqdm import tqdm\nfrom transformers import GenerationConfig, set_seed\nfrom utils import (\n FILE_NAME_TRAIN_PARAMS,\n BucketIterator,\n TrainResult,\n TrainStatus,\n get_accuracy,\n get_base_model_info,\n get_dataset_info,\n get_file_size,\n get_model,\n get_optimizer_and_scheduler,\n get_peft_branch,\n get_tokenizer,\n get_train_config,\n init_accelerator,\n log_results,\n validate_experiment_path,\n)\n\nfrom peft import AdaLoraConfig, PeftConfig\nfrom peft.utils import CONFIG_NAME, infer_device\n\n\n# # suppress all warnings\n# warnings.filterwarnings(\"ignore\") # FIXME?\n\ndtype_to_bytes_linear = {\"float32\": 4, \"float16\": 2, \"bfloat16\": 2, \"int8\": 1, \"int4\": 0.5}\n# if lr scheduler with warmup is used, the ratio of warmup steps to total steps\nBUCKET_FACTOR = 20 # number of batches per bucket, increasing this further has diminishing returns\n\n\ndef get_generation_config(*, seq_len, generate_kwargs) -> GenerationConfig:\n # filter out None values so that we don't depend on setting correct defaults in the config\n generation_kwargs = {k: v for k, v in generate_kwargs.items() if v is not None}\n if (\"max_length\" in generation_kwargs) and (\"max_new_tokens\" in generation_kwargs):\n # transformers does not support setting both max_length and max_new_tokens, but what we want in this case is to\n # take the smaller of the two values\n new_max_length = min(generation_kwargs[\"max_new_tokens\"] + seq_len, generation_kwargs[\"max_length\"])\n del generation_kwargs[\"max_new_tokens\"]\n generation_kwargs[\"max_length\"] = new_max_length\n generation_config = GenerationConfig(**generate_kwargs)\n return generation_config\n\n\ndef evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]:\n with torch.inference_mode():\n predictions = []\n responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,\n optimizer_kwargs: dict[str, Any],\n query_template: str,\n lr_scheduler_arg: Optional[Literal[\"cosine\"]],\n use_amp: bool,\n is_adalora: bool,\n) -> TrainResult:\n accelerator_memory_allocated_log = []\n accelerator_memory_reserved_log = []\n losses = []\n durations = []\n metrics = []\n sample = 0 # keep count of the current sample\n total_samples = 0 # total number of samples over all epochs\n total_tokens = [] # total number of tokens over all epochs\n\n device_type = infer_device()\n torch_accelerator_module = getattr(torch, device_type, torch.cuda)\n if use_amp:\n grad_scaler: GradScaler | DummyGradScaler = GradScaler(device=device_type)\n autocast_ctx: Callable[[], ContextManager[Any]] = partial(autocast, device_type=device_type)\n else:\n grad_scaler = DummyGradScaler()\n autocast_ctx = nullcontext\n\n optimizer, lr_scheduler = get_optimizer_and_scheduler(\n model,\n optimizer_type=optimizer_type,\n max_steps=max_steps,\n lr_scheduler_arg=lr_scheduler_arg,\n **optimizer_kwargs,\n )\n # print this after getting the optimizer, in case it modifies requires_gard\n if hasattr(model, \"get_nb_trainable_parameters\"):\n num_trainable_params, num_params = model.get_nb_trainable_parameters()\n else:\n num_params = model.num_parameters()\n num_trainable_params = num_params\n print_verbose(\n f\"trainable params: {num_trainable_params:,d} || all params: {num_params:,d} || \"\n f\"trainable: {100 * num_trainable_params / num_params:.4f}%\"\n )\n\n status = TrainStatus.FAILED\n tic_train = time.perf_counter()\n eval_time = 0.0\n error_msg = \"\"\n\n ds_train, ds_valid, ds_test = get_train_valid_test_datasets(\n tokenizer=tokenizer, query_template=query_template, print_fn=print_verbose\n )\n # note: bucketing by length is only really worth it for the train dataset, since it's length is big compared to the\n # batch size\n iterator_train = BucketIterator(\n ds_train,\n batch_size=batch_size,\n bucket_factor=BUCKET_FACTOR,\n delete_cols=[\"response\"],\n )\n try:\n pbar = tqdm(range(1, max_steps + 1))\n for step, batch in zip(pbar, iterator_train):\n tic = time.perf_counter()\n\n # create the batch\n tokens_per_sample = [len(i) for i in batch[\"input_ids\"]]\n total_tokens.append(sum(tokens_per_sample) + len(tokens_per_sample)) # add EOS token\n batch = tokenizer.pad(batch, return_tensors=\"pt\").to(model.device)\n actual_batch_size = len(batch[\"input_ids\"])\n total_samples += actual_batch_size\n sample += batch_size\n if sample >= len(ds_train): # new epoch\n sample = 0\n\n # add labels, they are automatically shifted by transformers\n labels = batch[\"input_ids\"].clone()\n # We want to ignore the padding tokens except for the first EOS token; if we don't ignore them, the loss\n # will be dominated by padding tokens; if we ignore all, the model will not learn to predict the EOS token.\n # TODO: Note that the longest sequence in the batch won't have any PAD/EOS token at the end, this is fine if\n # the batch size is > 1 but should still be fixed eventually.\n for i, num_tokens in enumerate(tokens_per_sample):\n labels[i, num_tokens + 1 :] = -100\n batch[\"labels\"] = labels\n num_items_in_batch = batch[\"attention_mask\"].sum().item()\n\n # train step\n optimizer.zero_grad()\n with autocast_ctx():\n outputs = model(**batch, num_items_in_batch=num_items_in_batch)\n loss = outputs.loss\n grad_scaler.scale(loss).backward()\n if grad_norm_clip:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), grad_norm_clip)\n grad_scaler.step(optimizer)\n grad_scaler.update()\n lr_scheduler.step()\n\n if is_adalora:\n model.base_model.update_and_allocate(step)\n\n losses.append(loss.item())\n pbar.set_postfix({\"loss\": loss.item()})\n accelerator_memory_allocated_log.append(\n torch_accelerator_module.memory_allocated() - accelerator_memory_init\n )\n accelerator_memory_reserved_log.append(\n torch_accelerator_module.memory_reserved() - accelerator_memory_init\n )\n toc = time.perf_counter()\n durations.append(toc - tic)\n\n # every couple of steps, evaluate; this can be slow due to generation\n if step % eval_steps == 0:\n tic_eval = time.perf_counter()\n loss_avg = sum(losses[-eval_steps:]) / eval_steps\n memory_allocated_avg = sum(accelerator_memory_allocated_log[-eval_steps:]) / eval_steps\n memory_reserved_avg = sum(accelerator_memory_reserved_log[-eval_steps:]) / eval_steps\n token_sum = sum(total_tokens[-eval_steps:])\n dur_train = sum(durations[-eval_steps:])\n tokens_per_sec = token_sum / dur_train\n\n model.eval()\n predictions, responses = evaluate(\n model=model,\n tokenizer=tokenizer,\n ds=ds_valid,\n batch_size=batch_size_eval,\n generate_kwargs={**generation_kwargs},\n )\n model.train()\n\n example = random.choice(predictions)\n example = textwrap.shorten(example, width=750)\n example = textwrap.indent(example, \" \")\n print_verbose(f\"\\nExample prediction:\\n{example}\\n\")\n accuracy = get_accuracy(predictions=predictions, responses=responses)\n num_tokens_generated = sum(sum(mask) for mask in tokenizer(predictions)[\"attention_mask\"])\n\n toc_eval = time.perf_counter()\n dur_eval = toc_eval - tic_eval\n eval_time += toc_eval - tic_eval\n elapsed = time.perf_counter() - tic_train\n\n metrics.append(\n {\n \"step\": step,\n \"valid accuracy\": accuracy,\n \"train loss\": loss_avg,\n \"train samples\": total_samples,\n \"train time\": dur_train,\n \"eval time\": dur_eval,\n \"tokens / sec\": tokens_per_sec,\n \"mem allocated avg\": memory_allocated_avg,\n \"mem reserved avg\": memory_reserved_avg,\n \"elapsed time\": elapsed,\n }\n )\n\n log_dict = {\n \"step\": f\"{step:5d}\",\n \"samples\": f\"{total_samples:7d}\",\n \"lr\": f\"{lr_scheduler.get_last_lr()[0]:.2e}\",\n \"loss avg\": f\"{loss_avg:.4f}\",\n \"valid acc\": f\"{accuracy:.3f}\",\n \"gen valid tokens\": num_tokens_generated,\n \"train time\": f\"{dur_train:.1f}s\",\n \"eval time\": f\"{dur_eval:.1f}s\",\n \"train tokens / sec\": f\"{tokens_per_sec:.0f}\",\n \"mem allocated\": f\"{memory_allocated_avg:.0f}\",\n \"mem reserved\": f\"{memory_reserved_avg:.0f}\",\n \"elapsed time\": f\"{elapsed // 60:.0f}min {elapsed % 60:.0f}s\",\n }\n print_verbose(json.dumps(log_dict))\n\n # # TODO is this needed?\n torch_accelerator_module.empty_cache()\n gc.collect()\n\n print_verbose(f\"Training finished after {max_steps} steps, evaluation on test set follows.\")\n # test set evaluation\n model.eval()\n predictions, responses = evaluate(\n model=model,\n tokenizer=tokenizer,\n ds=ds_test,\n batch_size=batch_size_eval,\n generate_kwargs={**generation_kwargs, \"pad_token_id\": tokenizer.eos_token_id},\n use_tqdm=len(ds_test) > 100,\n )\n accuracy = get_accuracy(predictions=predictions, responses=responses)\n metrics.append(\n {\n \"step\": step,\n \"test accuracy\": accuracy,\n \"train loss\": sum(losses[-eval_steps:]) / eval_steps,\n \"train samples\": total_samples,\n \"train total tokens\": sum(total_tokens),\n }\n )\n print_verbose(f\"Test accuracy: {accuracy:.3f}\")\n\n except KeyboardInterrupt:\n print_verbose(\"canceled training\")\n status = TrainStatus.CANCELED\n error_msg = \"manually canceled\"\n except torch.OutOfMemoryError as exc:\n # ouch, still let's try to log some results\n print_verbose(\"out of memory error encountered\")\n status = TrainStatus.CANCELED\n error_msg = str(exc)\n except Exception as exc:\n print_verbose(f\"encountered an error: {exc}\")\n status = TrainStatus.CANCELED\n error_msg = str(exc)\n\n toc_train = time.perf_counter()\n train_time = toc_train - tic_train - eval_time\n\n if status != TrainStatus.CANCELED:\n status = TrainStatus.SUCCESS\n train_result = TrainResult(\n status=status,\n train_time=train_time,\n accelerator_memory_reserved_log=accelerator_memory_reserved_log,\n losses=losses,\n metrics=metrics,\n error_msg=error_msg,\n num_trainable_params=num_trainable_params,\n num_total_params=num_params,\n )\n return train_result\n\n\ndef main(*, path_experiment: str, experiment_name: str, clean: bool) -> None:\n tic_total = time.perf_counter()\n start_date = dt.datetime.now(tz=dt.timezone.utc).replace(microsecond=0).isoformat()\n\n peft_branch = get_peft_branch()\n if peft_branch == \"main\":\n print_verbose(\"===== This experiment is categorized as a MAIN run because the PEFT branch is 'main' ======\")\n else:\n print_verbose(\n f\"===== This experiment is categorized as a TEST run because the PEFT branch is '{peft_branch}' ======\"\n )\n\n # load configs\n peft_config: Optional[PeftConfig] = None\n if os.path.exists(os.path.join(path_experiment, CONFIG_NAME)):\n peft_config = PeftConfig.from_pretrained(path_experiment)\n else:\n print_verbose(f\"Could not find PEFT config at {path_experiment}, performing FULL FINETUNING\")\n path_train_config = os.path.join(path_experiment, FILE_NAME_TRAIN_PARAMS)\n train_config = get_train_config(path_train_config)\n set_seed(train_config.seed)\n\n # initialize objects\n accelerator_memory_init = init_accelerator()\n tokenizer = get_tokenizer(model_id=train_config.model_id, max_seq_length=train_config.max_seq_length)\n\n model_info = get_base_model_info(train_config.model_id)\n metamath_info = get_dataset_info(\"meta-math/MetaMathQA\")\n gsm8k_info = get_dataset_info(\"openai/gsm8k\")\n model = get_model(\n model_id=train_config.model_id,\n dtype=train_config.dtype,\n compile=train_config.compile,\n attn_implementation=train_config.attn_implementation,\n peft_config=peft_config,\n autocast_adapter_dtype=train_config.autocast_adapter_dtype,\n )\n print_verbose(model)\n\n # train model\n train_result = train(\n model=model,\n max_steps=train_config.max_steps,\n batch_size=train_config.batch_size,\n batch_size_eval=train_config.batch_size_eval,\n tokenizer=tokenizer,\n accelerator_memory_init=accelerator_memory_init,\n eval_steps=train_config.eval_steps,\n generation_kwargs=train_config.generation_kwargs,\n grad_norm_clip=train_config.grad_norm_clip,\n optimizer_type=train_config.optimizer_type,\n optimizer_kwargs=train_config.optimizer_kwargs,\n query_template=train_config.query_template,\n lr_scheduler_arg=train_config.lr_scheduler,\n use_amp=train_config.use_amp,\n is_adalora=isinstance(peft_config, AdaLoraConfig),\n )\n\n if train_result.status == TrainStatus.FAILED:\n print_verbose(\"Training failed, not logging results\")\n sys.exit(1)\n\n file_size = get_file_size(\n model,\n peft_config=peft_config,\n clean=clean,\n print_fn=print_verbose,\n )\n\n time_total = time.perf_counter() - tic_total\n # log results: print and save to file\n log_results(\n experiment_name=experiment_name,\n train_result=train_result,\n accelerator_memory_init=accelerator_memory_init,\n time_total=time_total,\n file_size=file_size,\n model_info=model_info,\n datasets_info={\"metamath\": metamath_info, \"gsm8k\": gsm8k_info},\n start_date=start_date,\n train_config=train_config,\n peft_config=peft_config,\n print_fn=print_verbose,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Enable verbose output\")\n parser.add_argument(\"path_experiment\", type=str, help=\"Path to the experiment directory\")\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"Delete training artifacts after run finishes (logs are still saved)\",\n )\n args = parser.parse_args()\n\n experiment_name = validate_experiment_path(args.path_experiment)\n\n if args.verbose:\n\n def print_verbose(*args, **kwargs) -> None:\n kwargs[\"file\"] = sys.stderr\n print(*args, **kwargs)\n else:\n\n def print_verbose(*args, **kwargs) -> None:\n pass\n\n main(\n path_experiment=args.path_experiment,\n experiment_name=experiment_name,\n clean=args.clean,\n )","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.get_generation_config","uri":"program://MetaMathQA/function/run.get_generation_config#L69-L79","kind":"function","name":"get_generation_config","path":"run.py","language":"python","start_line":69,"end_line":79,"context_start_line":49,"context_end_line":99,"code":" get_peft_branch,\n get_tokenizer,\n get_train_config,\n init_accelerator,\n log_results,\n validate_experiment_path,\n)\n\nfrom peft import AdaLoraConfig, PeftConfig\nfrom peft.utils import CONFIG_NAME, infer_device\n\n\n# # suppress all warnings\n# warnings.filterwarnings(\"ignore\") # FIXME?\n\ndtype_to_bytes_linear = {\"float32\": 4, \"float16\": 2, \"bfloat16\": 2, \"int8\": 1, \"int4\": 0.5}\n# if lr scheduler with warmup is used, the ratio of warmup steps to total steps\nBUCKET_FACTOR = 20 # number of batches per bucket, increasing this further has diminishing returns\n\n\ndef get_generation_config(*, seq_len, generate_kwargs) -> GenerationConfig:\n # filter out None values so that we don't depend on setting correct defaults in the config\n generation_kwargs = {k: v for k, v in generate_kwargs.items() if v is not None}\n if (\"max_length\" in generation_kwargs) and (\"max_new_tokens\" in generation_kwargs):\n # transformers does not support setting both max_length and max_new_tokens, but what we want in this case is to\n # take the smaller of the two values\n new_max_length = min(generation_kwargs[\"max_new_tokens\"] + seq_len, generation_kwargs[\"max_length\"])\n del generation_kwargs[\"max_new_tokens\"]\n generation_kwargs[\"max_length\"] = new_max_length\n generation_config = GenerationConfig(**generate_kwargs)\n return generation_config\n\n\ndef evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]:\n with torch.inference_mode():\n predictions = []\n responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.evaluate","uri":"program://MetaMathQA/function/run.evaluate#L82-L97","kind":"function","name":"evaluate","path":"run.py","language":"python","start_line":82,"end_line":97,"context_start_line":62,"context_end_line":117,"code":"# warnings.filterwarnings(\"ignore\") # FIXME?\n\ndtype_to_bytes_linear = {\"float32\": 4, \"float16\": 2, \"bfloat16\": 2, \"int8\": 1, \"int4\": 0.5}\n# if lr scheduler with warmup is used, the ratio of warmup steps to total steps\nBUCKET_FACTOR = 20 # number of batches per bucket, increasing this further has diminishing returns\n\n\ndef get_generation_config(*, seq_len, generate_kwargs) -> GenerationConfig:\n # filter out None values so that we don't depend on setting correct defaults in the config\n generation_kwargs = {k: v for k, v in generate_kwargs.items() if v is not None}\n if (\"max_length\" in generation_kwargs) and (\"max_new_tokens\" in generation_kwargs):\n # transformers does not support setting both max_length and max_new_tokens, but what we want in this case is to\n # take the smaller of the two values\n new_max_length = min(generation_kwargs[\"max_new_tokens\"] + seq_len, generation_kwargs[\"max_length\"])\n del generation_kwargs[\"max_new_tokens\"]\n generation_kwargs[\"max_length\"] = new_max_length\n generation_config = GenerationConfig(**generate_kwargs)\n return generation_config\n\n\ndef evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]:\n with torch.inference_mode():\n predictions = []\n responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.DummyGradScaler","uri":"program://MetaMathQA/class/run.DummyGradScaler#L100-L112","kind":"class","name":"DummyGradScaler","path":"run.py","language":"python","start_line":100,"end_line":112,"context_start_line":80,"context_end_line":132,"code":"\n\ndef evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]:\n with torch.inference_mode():\n predictions = []\n responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,\n optimizer_kwargs: dict[str, Any],\n query_template: str,\n lr_scheduler_arg: Optional[Literal[\"cosine\"]],\n use_amp: bool,\n is_adalora: bool,\n) -> TrainResult:","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.train","uri":"program://MetaMathQA/function/run.train#L115-L357","kind":"function","name":"train","path":"run.py","language":"python","start_line":115,"end_line":357,"context_start_line":95,"context_end_line":377,"code":" outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,\n optimizer_kwargs: dict[str, Any],\n query_template: str,\n lr_scheduler_arg: Optional[Literal[\"cosine\"]],\n use_amp: bool,\n is_adalora: bool,\n) -> TrainResult:\n accelerator_memory_allocated_log = []\n accelerator_memory_reserved_log = []\n losses = []\n durations = []\n metrics = []\n sample = 0 # keep count of the current sample\n total_samples = 0 # total number of samples over all epochs\n total_tokens = [] # total number of tokens over all epochs\n\n device_type = infer_device()\n torch_accelerator_module = getattr(torch, device_type, torch.cuda)\n if use_amp:\n grad_scaler: GradScaler | DummyGradScaler = GradScaler(device=device_type)\n autocast_ctx: Callable[[], ContextManager[Any]] = partial(autocast, device_type=device_type)\n else:\n grad_scaler = DummyGradScaler()\n autocast_ctx = nullcontext\n\n optimizer, lr_scheduler = get_optimizer_and_scheduler(\n model,\n optimizer_type=optimizer_type,\n max_steps=max_steps,\n lr_scheduler_arg=lr_scheduler_arg,\n **optimizer_kwargs,\n )\n # print this after getting the optimizer, in case it modifies requires_gard\n if hasattr(model, \"get_nb_trainable_parameters\"):\n num_trainable_params, num_params = model.get_nb_trainable_parameters()\n else:\n num_params = model.num_parameters()\n num_trainable_params = num_params\n print_verbose(\n f\"trainable params: {num_trainable_params:,d} || all params: {num_params:,d} || \"\n f\"trainable: {100 * num_trainable_params / num_params:.4f}%\"\n )\n\n status = TrainStatus.FAILED\n tic_train = time.perf_counter()\n eval_time = 0.0\n error_msg = \"\"\n\n ds_train, ds_valid, ds_test = get_train_valid_test_datasets(\n tokenizer=tokenizer, query_template=query_template, print_fn=print_verbose\n )\n # note: bucketing by length is only really worth it for the train dataset, since it's length is big compared to the\n # batch size\n iterator_train = BucketIterator(\n ds_train,\n batch_size=batch_size,\n bucket_factor=BUCKET_FACTOR,\n delete_cols=[\"response\"],\n )\n try:\n pbar = tqdm(range(1, max_steps + 1))\n for step, batch in zip(pbar, iterator_train):\n tic = time.perf_counter()\n\n # create the batch\n tokens_per_sample = [len(i) for i in batch[\"input_ids\"]]\n total_tokens.append(sum(tokens_per_sample) + len(tokens_per_sample)) # add EOS token\n batch = tokenizer.pad(batch, return_tensors=\"pt\").to(model.device)\n actual_batch_size = len(batch[\"input_ids\"])\n total_samples += actual_batch_size\n sample += batch_size\n if sample >= len(ds_train): # new epoch\n sample = 0\n\n # add labels, they are automatically shifted by transformers\n labels = batch[\"input_ids\"].clone()\n # We want to ignore the padding tokens except for the first EOS token; if we don't ignore them, the loss\n # will be dominated by padding tokens; if we ignore all, the model will not learn to predict the EOS token.\n # TODO: Note that the longest sequence in the batch won't have any PAD/EOS token at the end, this is fine if\n # the batch size is > 1 but should still be fixed eventually.\n for i, num_tokens in enumerate(tokens_per_sample):\n labels[i, num_tokens + 1 :] = -100\n batch[\"labels\"] = labels\n num_items_in_batch = batch[\"attention_mask\"].sum().item()\n\n # train step\n optimizer.zero_grad()\n with autocast_ctx():\n outputs = model(**batch, num_items_in_batch=num_items_in_batch)\n loss = outputs.loss\n grad_scaler.scale(loss).backward()\n if grad_norm_clip:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), grad_norm_clip)\n grad_scaler.step(optimizer)\n grad_scaler.update()\n lr_scheduler.step()\n\n if is_adalora:\n model.base_model.update_and_allocate(step)\n\n losses.append(loss.item())\n pbar.set_postfix({\"loss\": loss.item()})\n accelerator_memory_allocated_log.append(\n torch_accelerator_module.memory_allocated() - accelerator_memory_init\n )\n accelerator_memory_reserved_log.append(\n torch_accelerator_module.memory_reserved() - accelerator_memory_init\n )\n toc = time.perf_counter()\n durations.append(toc - tic)\n\n # every couple of steps, evaluate; this can be slow due to generation\n if step % eval_steps == 0:\n tic_eval = time.perf_counter()\n loss_avg = sum(losses[-eval_steps:]) / eval_steps\n memory_allocated_avg = sum(accelerator_memory_allocated_log[-eval_steps:]) / eval_steps\n memory_reserved_avg = sum(accelerator_memory_reserved_log[-eval_steps:]) / eval_steps\n token_sum = sum(total_tokens[-eval_steps:])\n dur_train = sum(durations[-eval_steps:])\n tokens_per_sec = token_sum / dur_train\n\n model.eval()\n predictions, responses = evaluate(\n model=model,\n tokenizer=tokenizer,\n ds=ds_valid,\n batch_size=batch_size_eval,\n generate_kwargs={**generation_kwargs},\n )\n model.train()\n\n example = random.choice(predictions)\n example = textwrap.shorten(example, width=750)\n example = textwrap.indent(example, \" \")\n print_verbose(f\"\\nExample prediction:\\n{example}\\n\")\n accuracy = get_accuracy(predictions=predictions, responses=responses)\n num_tokens_generated = sum(sum(mask) for mask in tokenizer(predictions)[\"attention_mask\"])\n\n toc_eval = time.perf_counter()\n dur_eval = toc_eval - tic_eval\n eval_time += toc_eval - tic_eval\n elapsed = time.perf_counter() - tic_train\n\n metrics.append(\n {\n \"step\": step,\n \"valid accuracy\": accuracy,\n \"train loss\": loss_avg,\n \"train samples\": total_samples,\n \"train time\": dur_train,\n \"eval time\": dur_eval,\n \"tokens / sec\": tokens_per_sec,\n \"mem allocated avg\": memory_allocated_avg,\n \"mem reserved avg\": memory_reserved_avg,\n \"elapsed time\": elapsed,\n }\n )\n\n log_dict = {\n \"step\": f\"{step:5d}\",\n \"samples\": f\"{total_samples:7d}\",\n \"lr\": f\"{lr_scheduler.get_last_lr()[0]:.2e}\",\n \"loss avg\": f\"{loss_avg:.4f}\",\n \"valid acc\": f\"{accuracy:.3f}\",\n \"gen valid tokens\": num_tokens_generated,\n \"train time\": f\"{dur_train:.1f}s\",\n \"eval time\": f\"{dur_eval:.1f}s\",\n \"train tokens / sec\": f\"{tokens_per_sec:.0f}\",\n \"mem allocated\": f\"{memory_allocated_avg:.0f}\",\n \"mem reserved\": f\"{memory_reserved_avg:.0f}\",\n \"elapsed time\": f\"{elapsed // 60:.0f}min {elapsed % 60:.0f}s\",\n }\n print_verbose(json.dumps(log_dict))\n\n # # TODO is this needed?\n torch_accelerator_module.empty_cache()\n gc.collect()\n\n print_verbose(f\"Training finished after {max_steps} steps, evaluation on test set follows.\")\n # test set evaluation\n model.eval()\n predictions, responses = evaluate(\n model=model,\n tokenizer=tokenizer,\n ds=ds_test,\n batch_size=batch_size_eval,\n generate_kwargs={**generation_kwargs, \"pad_token_id\": tokenizer.eos_token_id},\n use_tqdm=len(ds_test) > 100,\n )\n accuracy = get_accuracy(predictions=predictions, responses=responses)\n metrics.append(\n {\n \"step\": step,\n \"test accuracy\": accuracy,\n \"train loss\": sum(losses[-eval_steps:]) / eval_steps,\n \"train samples\": total_samples,\n \"train total tokens\": sum(total_tokens),\n }\n )\n print_verbose(f\"Test accuracy: {accuracy:.3f}\")\n\n except KeyboardInterrupt:\n print_verbose(\"canceled training\")\n status = TrainStatus.CANCELED\n error_msg = \"manually canceled\"\n except torch.OutOfMemoryError as exc:\n # ouch, still let's try to log some results\n print_verbose(\"out of memory error encountered\")\n status = TrainStatus.CANCELED\n error_msg = str(exc)\n except Exception as exc:\n print_verbose(f\"encountered an error: {exc}\")\n status = TrainStatus.CANCELED\n error_msg = str(exc)\n\n toc_train = time.perf_counter()\n train_time = toc_train - tic_train - eval_time\n\n if status != TrainStatus.CANCELED:\n status = TrainStatus.SUCCESS\n train_result = TrainResult(\n status=status,\n train_time=train_time,\n accelerator_memory_reserved_log=accelerator_memory_reserved_log,\n losses=losses,\n metrics=metrics,\n error_msg=error_msg,\n num_trainable_params=num_trainable_params,\n num_total_params=num_params,\n )\n return train_result\n\n\ndef main(*, path_experiment: str, experiment_name: str, clean: bool) -> None:\n tic_total = time.perf_counter()\n start_date = dt.datetime.now(tz=dt.timezone.utc).replace(microsecond=0).isoformat()\n\n peft_branch = get_peft_branch()\n if peft_branch == \"main\":\n print_verbose(\"===== This experiment is categorized as a MAIN run because the PEFT branch is 'main' ======\")\n else:\n print_verbose(\n f\"===== This experiment is categorized as a TEST run because the PEFT branch is '{peft_branch}' ======\"\n )\n\n # load configs\n peft_config: Optional[PeftConfig] = None\n if os.path.exists(os.path.join(path_experiment, CONFIG_NAME)):\n peft_config = PeftConfig.from_pretrained(path_experiment)\n else:\n print_verbose(f\"Could not find PEFT config at {path_experiment}, performing FULL FINETUNING\")","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.main","uri":"program://MetaMathQA/function/run.main#L360-L443","kind":"function","name":"main","path":"run.py","language":"python","start_line":360,"end_line":443,"context_start_line":340,"context_end_line":463,"code":" error_msg = str(exc)\n\n toc_train = time.perf_counter()\n train_time = toc_train - tic_train - eval_time\n\n if status != TrainStatus.CANCELED:\n status = TrainStatus.SUCCESS\n train_result = TrainResult(\n status=status,\n train_time=train_time,\n accelerator_memory_reserved_log=accelerator_memory_reserved_log,\n losses=losses,\n metrics=metrics,\n error_msg=error_msg,\n num_trainable_params=num_trainable_params,\n num_total_params=num_params,\n )\n return train_result\n\n\ndef main(*, path_experiment: str, experiment_name: str, clean: bool) -> None:\n tic_total = time.perf_counter()\n start_date = dt.datetime.now(tz=dt.timezone.utc).replace(microsecond=0).isoformat()\n\n peft_branch = get_peft_branch()\n if peft_branch == \"main\":\n print_verbose(\"===== This experiment is categorized as a MAIN run because the PEFT branch is 'main' ======\")\n else:\n print_verbose(\n f\"===== This experiment is categorized as a TEST run because the PEFT branch is '{peft_branch}' ======\"\n )\n\n # load configs\n peft_config: Optional[PeftConfig] = None\n if os.path.exists(os.path.join(path_experiment, CONFIG_NAME)):\n peft_config = PeftConfig.from_pretrained(path_experiment)\n else:\n print_verbose(f\"Could not find PEFT config at {path_experiment}, performing FULL FINETUNING\")\n path_train_config = os.path.join(path_experiment, FILE_NAME_TRAIN_PARAMS)\n train_config = get_train_config(path_train_config)\n set_seed(train_config.seed)\n\n # initialize objects\n accelerator_memory_init = init_accelerator()\n tokenizer = get_tokenizer(model_id=train_config.model_id, max_seq_length=train_config.max_seq_length)\n\n model_info = get_base_model_info(train_config.model_id)\n metamath_info = get_dataset_info(\"meta-math/MetaMathQA\")\n gsm8k_info = get_dataset_info(\"openai/gsm8k\")\n model = get_model(\n model_id=train_config.model_id,\n dtype=train_config.dtype,\n compile=train_config.compile,\n attn_implementation=train_config.attn_implementation,\n peft_config=peft_config,\n autocast_adapter_dtype=train_config.autocast_adapter_dtype,\n )\n print_verbose(model)\n\n # train model\n train_result = train(\n model=model,\n max_steps=train_config.max_steps,\n batch_size=train_config.batch_size,\n batch_size_eval=train_config.batch_size_eval,\n tokenizer=tokenizer,\n accelerator_memory_init=accelerator_memory_init,\n eval_steps=train_config.eval_steps,\n generation_kwargs=train_config.generation_kwargs,\n grad_norm_clip=train_config.grad_norm_clip,\n optimizer_type=train_config.optimizer_type,\n optimizer_kwargs=train_config.optimizer_kwargs,\n query_template=train_config.query_template,\n lr_scheduler_arg=train_config.lr_scheduler,\n use_amp=train_config.use_amp,\n is_adalora=isinstance(peft_config, AdaLoraConfig),\n )\n\n if train_result.status == TrainStatus.FAILED:\n print_verbose(\"Training failed, not logging results\")\n sys.exit(1)\n\n file_size = get_file_size(\n model,\n peft_config=peft_config,\n clean=clean,\n print_fn=print_verbose,\n )\n\n time_total = time.perf_counter() - tic_total\n # log results: print and save to file\n log_results(\n experiment_name=experiment_name,\n train_result=train_result,\n accelerator_memory_init=accelerator_memory_init,\n time_total=time_total,\n file_size=file_size,\n model_info=model_info,\n datasets_info={\"metamath\": metamath_info, \"gsm8k\": gsm8k_info},\n start_date=start_date,\n train_config=train_config,\n peft_config=peft_config,\n print_fn=print_verbose,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Enable verbose output\")\n parser.add_argument(\"path_experiment\", type=str, help=\"Path to the experiment directory\")\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"Delete training artifacts after run finishes (logs are still saved)\",\n )\n args = parser.parse_args()\n\n experiment_name = validate_experiment_path(args.path_experiment)\n\n if args.verbose:\n\n def print_verbose(*args, **kwargs) -> None:\n kwargs[\"file\"] = sys.stderr\n print(*args, **kwargs)","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.scale","uri":"program://MetaMathQA/function/run.scale#L102-L103","kind":"function","name":"scale","path":"run.py","language":"python","start_line":102,"end_line":103,"context_start_line":82,"context_end_line":123,"code":"def evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]:\n with torch.inference_mode():\n predictions = []\n responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.unscale_","uri":"program://MetaMathQA/function/run.unscale_#L105-L106","kind":"function","name":"unscale_","path":"run.py","language":"python","start_line":105,"end_line":106,"context_start_line":85,"context_end_line":126,"code":" responses = []\n pbar = range(0, len(ds), batch_size)\n if use_tqdm:\n pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.step","uri":"program://MetaMathQA/function/run.step#L108-L109","kind":"function","name":"step","path":"run.py","language":"python","start_line":108,"end_line":109,"context_start_line":88,"context_end_line":129,"code":" pbar = tqdm(pbar)\n for j in pbar:\n sliced = ds[j : j + batch_size]\n responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,\n optimizer_kwargs: dict[str, Any],\n query_template: str,\n lr_scheduler_arg: Optional[Literal[\"cosine\"]],","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.update","uri":"program://MetaMathQA/function/run.update#L111-L112","kind":"function","name":"update","path":"run.py","language":"python","start_line":111,"end_line":112,"context_start_line":91,"context_end_line":132,"code":" responses += sliced.pop(\"response\")\n batch = tokenizer.pad(sliced, return_tensors=\"pt\", padding_side=\"left\").to(model.device)\n seq_len = batch[\"input_ids\"].shape[1]\n generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs)\n outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id)\n predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True)\n return predictions, responses\n\n\nclass DummyGradScaler:\n # if no mixed precision is being used\n def scale(self, loss):\n return loss\n\n def unscale_(self, optimizer):\n pass\n\n def step(self, optimizer):\n optimizer.step()\n\n def update(self):\n pass\n\n\ndef train(\n *,\n model: nn.Module,\n max_steps: int,\n batch_size: int,\n batch_size_eval: int,\n tokenizer: Any,\n accelerator_memory_init: int,\n eval_steps: int,\n generation_kwargs: dict[str, Any],\n grad_norm_clip: float,\n optimizer_type: str,\n optimizer_kwargs: dict[str, Any],\n query_template: str,\n lr_scheduler_arg: Optional[Literal[\"cosine\"]],\n use_amp: bool,\n is_adalora: bool,\n) -> TrainResult:","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:run.print_verbose","uri":"program://MetaMathQA/function/run.print_verbose#L466-L467","kind":"function","name":"print_verbose","path":"run.py","language":"python","start_line":466,"end_line":467,"context_start_line":446,"context_end_line":473,"code":"if __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Enable verbose output\")\n parser.add_argument(\"path_experiment\", type=str, help=\"Path to the experiment directory\")\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"Delete training artifacts after run finishes (logs are still saved)\",\n )\n args = parser.parse_args()\n\n experiment_name = validate_experiment_path(args.path_experiment)\n\n if args.verbose:\n\n def print_verbose(*args, **kwargs) -> None:\n kwargs[\"file\"] = sys.stderr\n print(*args, **kwargs)\n else:\n\n def print_verbose(*args, **kwargs) -> None:\n pass\n\n main(\n path_experiment=args.path_experiment,\n experiment_name=experiment_name,\n clean=args.clean,\n )","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils","uri":"program://MetaMathQA/module/utils#L1-L710","kind":"module","name":"utils","path":"utils.py","language":"python","start_line":1,"end_line":710,"context_start_line":1,"context_end_line":710,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAll utilities not related to data handling.\n\"\"\"\n\nimport enum\nimport json\nimport os\nimport platform\nimport subprocess\nimport tempfile\nimport warnings\nfrom dataclasses import asdict, dataclass\nfrom decimal import Decimal, DivisionByZero, InvalidOperation\nfrom typing import Any, Callable, Literal, Optional\n\nimport bitsandbytes\nimport datasets\nimport huggingface_hub\nimport numpy as np\nimport torch\nimport transformers\nfrom torch import nn\nfrom transformers import (\n AutoModelForCausalLM,\n AutoTokenizer,\n BitsAndBytesConfig,\n get_cosine_schedule_with_warmup,\n)\n\nimport peft\nfrom peft import PeftConfig, get_peft_model, prepare_model_for_kbit_training\nfrom peft.optimizers import create_lorafa_optimizer, create_loraplus_optimizer\nfrom peft.utils import SAFETENSORS_WEIGHTS_NAME, infer_device\n\n\ndevice = infer_device()\n\nif device not in [\"cuda\", \"xpu\"]:\n raise RuntimeError(\"CUDA or XPU is not available, currently only CUDA or XPU is supported\")\n\nACCELERATOR_MEMORY_INIT_THRESHOLD = 500 * 2**20 # 500MB\nFILE_NAME_DEFAULT_TRAIN_PARAMS = os.path.join(os.path.dirname(__file__), \"default_training_params.json\")\nFILE_NAME_TRAIN_PARAMS = \"training_params.json\" # specific params for this experiment\n# main results\nRESULT_PATH = os.path.join(os.path.dirname(__file__), \"results\")\n# testing results\nRESULT_PATH_TEST = os.path.join(os.path.dirname(__file__), \"temporary_results\")\n# cancelled results\nRESULT_PATH_CANCELLED = os.path.join(os.path.dirname(__file__), \"cancelled_results\")\nhf_api = huggingface_hub.HfApi()\nWARMUP_STEP_RATIO = 0.1\n\n\n@dataclass\nclass TrainConfig:\n \"\"\"All configuration parameters associated with training the model\n\n Args:\n model_id: The model identifier\n dtype: The data type to use for the model\n max_seq_length: The maximum sequence length\n batch_size: The batch size for training\n batch_size_eval: The batch size for eval/test, can be much higher than for training\n max_steps: The maximum number of steps to train for\n eval_steps: The number of steps between evaluations\n compile: Whether to compile the model\n query_template: The template for the query\n seed: The random seed\n grad_norm_clip: The gradient norm clipping value (set to 0 to skip)\n optimizer_type: The name of a torch optimizer (e.g. AdamW) or a PEFT method (\"lora+\", \"lora-fa\")\n optimizer_kwargs: The optimizer keyword arguments (lr etc.)\n lr_scheduler: The learning rate scheduler (currently only None or 'cosine' are supported)\n use_amp: Whether to use automatic mixed precision\n autocast_adapter_dtype: Whether to cast adapter dtype to float32, same argument as in PEFT\n generation_kwargs: Arguments passed to transformers GenerationConfig (used in evaluation)\n attn_implementation: The attention implementation to use (if any), see transformers docs\n \"\"\"\n\n model_id: str\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]\n max_seq_length: int\n batch_size: int\n batch_size_eval: int\n max_steps: int\n eval_steps: int\n compile: bool\n query_template: str\n seed: int\n grad_norm_clip: float # set to 0 to skip\n optimizer_type: str\n optimizer_kwargs: dict[str, Any]\n lr_scheduler: Optional[Literal[\"cosine\"]]\n use_amp: bool\n autocast_adapter_dtype: bool\n generation_kwargs: dict[str, Any]\n attn_implementation: Optional[str]\n\n def __post_init__(self) -> None:\n if not isinstance(self.model_id, str):\n raise ValueError(f\"Invalid model_id: {self.model_id}\")\n if self.dtype not in [\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]:\n raise ValueError(f\"Invalid dtype: {self.dtype}\")\n if self.max_seq_length < 0:\n raise ValueError(f\"Invalid max_seq_length: {self.max_seq_length}\")\n if self.batch_size <= 0:\n raise ValueError(f\"Invalid batch_size: {self.batch_size}\")\n if self.batch_size_eval <= 0:\n raise ValueError(f\"Invalid eval batch_size: {self.batch_size_eval}\")\n if self.max_steps <= 0:\n raise ValueError(f\"Invalid max_steps: {self.max_steps}\")\n if self.eval_steps <= 0:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps}\")\n if self.eval_steps > self.max_steps:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps} > max_steps: {self.max_steps}\")\n if self.grad_norm_clip < 0:\n raise ValueError(f\"Invalid grad_norm_clip: {self.grad_norm_clip}\")\n if self.optimizer_type not in [\"lora+\", \"lora-fa\"] and not hasattr(torch.optim, self.optimizer_type):\n raise ValueError(f\"Invalid optimizer_type: {self.optimizer_type}\")\n if self.lr_scheduler not in [None, \"cosine\"]:\n raise ValueError(f\"Invalid lr_scheduler: {self.lr_scheduler}, must be None or 'cosine'\")\n if \"{query}\" not in self.query_template:\n raise ValueError(\"Invalid query_template, must contain '{query}'\")\n\n\ndef validate_experiment_path(path: str) -> str:\n # the experiment path should take the form of ./experiments//\n # e.g. ./experiments/lora/rank32\n # it should contain:\n # - adapter_config.json\n # - optional: training_params.json\n if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS):\n raise FileNotFoundError(\n f\"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory\"\n )\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Path {path} does not exist\")\n\n # check path structure\n path_parts = path.rstrip(os.path.sep).split(os.path.sep)\n if (len(path_parts) != 3) or (path_parts[-3] != \"experiments\"):\n raise ValueError(\n f\"Path {path} does not have the correct structure, should be ./experiments//\"\n )\n\n experiment_name = os.path.join(*path_parts[-2:])\n return experiment_name\n\n\ndef get_train_config(path: str) -> TrainConfig:\n # first, load the default params, then update with experiment-specific params\n with open(FILE_NAME_DEFAULT_TRAIN_PARAMS) as f:\n default_config_kwargs = json.load(f)\n\n config_kwargs = {}\n if os.path.exists(path):\n with open(path) as f:\n config_kwargs = json.load(f)\n\n config_kwargs = {**default_config_kwargs, **config_kwargs}\n return TrainConfig(**config_kwargs)\n\n\ndef init_accelerator() -> int:\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n torch.manual_seed(0)\n torch_accelerator_module.reset_peak_memory_stats()\n torch_accelerator_module.manual_seed_all(0)\n # might not be necessary, but just to be sure\n nn.Linear(1, 1).to(device)\n\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD:\n raise RuntimeError(\n f\"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other \"\n f\"processes are running on {device}.\"\n )\n\n torch_accelerator_module.reset_peak_memory_stats()\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n return accelerator_memory_init\n\n\ndef get_tokenizer(*, model_id: str, max_seq_length: int):\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n tokenizer.model_max_length = max_seq_length\n if not tokenizer.pad_token:\n tokenizer.pad_token = tokenizer.eos_token\n return tokenizer\n\n\ndef get_base_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n) -> nn.Module:\n kwargs: dict[str, Any] = {\n \"pretrained_model_name_or_path\": model_id,\n \"device_map\": device,\n \"attn_implementation\": attn_implementation,\n }\n if dtype == \"int4\":\n quant_config = BitsAndBytesConfig(load_in_4bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"int8\":\n quant_config = BitsAndBytesConfig(load_in_8bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"bfloat16\":\n kwargs[\"torch_dtype\"] = torch.bfloat16\n elif dtype == \"float16\":\n kwargs[\"torch_dtype\"] = torch.float16\n elif dtype != \"float32\":\n raise ValueError(f\"Invalid dtype: {dtype}\")\n\n model = AutoModelForCausalLM.from_pretrained(**kwargs)\n\n if dtype in [\"int8\", \"int4\"]:\n model = prepare_model_for_kbit_training(model)\n\n if compile:\n model = torch.compile(model)\n\n return model\n\n\ndef get_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n peft_config: Optional[PeftConfig],\n autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps\n )\n elif lr_scheduler_arg is None:\n lr_scheduler = DummyScheduler(optimizer_kwargs[\"lr\"])\n else:\n raise ValueError(f\"Invalid lr_scheduler argument: {lr_scheduler_arg}\")\n\n return optimizer, lr_scheduler\n\n\nclass BucketIterator:\n \"\"\"\n Iterator that yields batches of data from a torch Dataset, grouped in buckets by sequence length\n\n The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length.\n This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus\n introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`.\n\n Args:\n ds: The torch Dataset to iterate over\n batch_size: The batch size\n bucket_factor: The factor by which to multiply the batch size to determine the bucket size\n delete_cols: The columns to delete from the dataset before yielding a batch\n \"\"\"\n\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size\n self.bucket_factor = bucket_factor\n self.delete_cols = set(delete_cols)\n\n assert self.bucket_factor > 0, \"bucket_factor must be greater than 0\"\n\n def _batch_iterator(self, bucket):\n tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket[\"input_ids\"]])\n # sort long to short instead to encounter possible OOM errors as early as possible\n sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:\n bucket = self.ds[-(len(self.ds) % bucket_size) :]\n yield from self._batch_iterator(bucket)\n\n\ndef get_file_size(\n model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None]\n) -> int:\n file_size = 99999999 # set a default dummy value\n if peft_config is not None:\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir:\n model.save_pretrained(tmp_dir)\n stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME))\n file_size = stat.st_size\n if not clean:\n print_fn(f\"Saved PEFT checkpoint to {tmp_dir}\")\n except Exception as exc:\n print(f\"Failed to save PEFT checkpoint due to the following error: {exc}\")\n else:\n print_fn(\"Not saving the fully fine-tuned model because it's too big, estimating the size instead\")\n try:\n num_params = model.num_parameters()\n dtype_size = next(model.parameters()).element_size()\n file_size = num_params * dtype_size\n except Exception as exc:\n print(f\"Failed to determine file size for fully finetuned model because of: {exc}\")\n return file_size\n\n\n##################\n# ANSWER PARSING #\n##################\n\n\ndef parse_answer(text: str) -> Optional[str]:\n \"\"\"\n A label/prediction can look like this:\n\n Question: If the magnitude of vector v is equal to 4, what is the dot product of vector v with itself?. Think step\n by step\n Answer: The dot product of a vector with itself is equal to the square of its magnitude. So, the dot product of\n vector v with itself is equal to $4^2 = \\boxed{16}$.The answer is: 16\n\n We want to extract '16' from this string.\n\n \"\"\"\n # This implementation is based on sampling meta-llama/Llama-3.1-8B-Instruct. It may not work for other models.\n candidate_delimiters = [\n # MetaMath:\n \"The answer is: \",\n \"The answer is \",\n \"The final answer is: \",\n \"The final answer is \",\n # GSM8K:\n \"#### \",\n ]\n text = text.strip()\n text = text.rstrip(\".!?\")\n for delimiter in candidate_delimiters:\n if delimiter in text:\n break\n else: # no match\n return None\n\n text = text.rpartition(delimiter)[-1].strip()\n # if a new paragraph follows after the final answer, we want to remove it\n text = text.split(\"\\n\", 1)[0]\n # note: we can just remove % here since the GSM8K dataset just omits it, i.e. 50% -> 50, no need to divide by 100\n text = text.strip(\" .!?$%\")\n return text\n\n\ndef convert_to_decimal(s: Optional[str]) -> Optional[Decimal]:\n \"\"\"\n Converts a string representing a number to a Decimal.\n\n The string may be:\n - A simple number (e.g., \"13\", \"65.33\")\n - A fraction (e.g., \"20/14\")\n \"\"\"\n if s is None:\n return None\n\n try:\n s = s.strip()\n # Check if the string represents a fraction.\n if \"/\" in s:\n parts = s.split(\"/\")\n if len(parts) != 2:\n return None\n numerator = Decimal(parts[0].strip())\n denominator = Decimal(parts[1].strip())\n if denominator == 0:\n return None\n value = numerator / denominator\n else:\n # Parse as a regular decimal or integer string.\n value = Decimal(s)\n return value\n except (DivisionByZero, InvalidOperation, ValueError):\n return None\n\n\ndef get_accuracy(*, predictions: list[str], responses: list[str]) -> float:\n if len(predictions) != len(responses):\n raise ValueError(f\"Prediction length mismatch: {len(predictions)} != {len(responses)}\")\n\n y_true: list[str | float | None] = []\n y_pred: list[str | float | None] = []\n\n for prediction, response in zip(predictions, responses):\n parsed_prediction = parse_answer(prediction)\n parsed_response = parse_answer(response)\n if parsed_response is None:\n raise ValueError(f\"Error encountered while trying to parse response: {response}\")\n\n decimal_prediction = convert_to_decimal(parsed_prediction)\n decimal_answer = convert_to_decimal(parsed_response)\n if decimal_prediction is not None:\n y_pred.append(float(decimal_prediction))\n elif parsed_prediction is not None:\n y_pred.append(parsed_prediction)\n else:\n y_pred.append(None)\n\n # we convert decimals to float so that stuff like this works:\n # float(convert_to_decimal('20/35')) == float(convert_to_decimal('0.5714285714285714'))\n if decimal_answer is not None:\n y_true.append(float(decimal_answer))\n elif parsed_prediction is not None:\n y_true.append(parsed_response)\n else:\n y_true.append(None)\n\n correct: list[bool] = []\n for true, pred in zip(y_true, y_pred):\n if (true is not None) and (pred is not None):\n correct.append(true == pred)\n else:\n correct.append(False)\n\n accuracy = sum(correct) / len(correct)\n return accuracy\n\n\n###########\n# LOGGING #\n###########\n\n\ndef get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]:\n try:\n return hf_api.model_info(model_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")\n return None\n\n\ndef get_git_hash(module) -> Optional[str]:\n if \"site-packages\" in module.__path__[0]:\n return None\n\n return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {\n \"transformers-version\": transformers.__version__,\n \"transformers-commit-hash\": get_git_hash(transformers),\n \"peft-version\": peft.__version__,\n \"peft-commit-hash\": get_git_hash(peft),\n \"datasets-version\": datasets.__version__,\n \"datasets-commit-hash\": get_git_hash(datasets),\n \"bitsandbytes-version\": bitsandbytes.__version__,\n \"bitsandbytes-commit-hash\": get_git_hash(bitsandbytes),\n \"torch-version\": torch.__version__,\n \"torch-commit-hash\": get_git_hash(torch),\n }\n return package_info\n\n\ndef get_system_info() -> dict[str, str]:\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torc\n# ... truncated ...","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":true} {"repo_id":"MetaMathQA","entity_id":"py:utils.TrainConfig","uri":"program://MetaMathQA/class/utils.TrainConfig#L69-L136","kind":"class","name":"TrainConfig","path":"utils.py","language":"python","start_line":69,"end_line":136,"context_start_line":49,"context_end_line":156,"code":"\ndevice = infer_device()\n\nif device not in [\"cuda\", \"xpu\"]:\n raise RuntimeError(\"CUDA or XPU is not available, currently only CUDA or XPU is supported\")\n\nACCELERATOR_MEMORY_INIT_THRESHOLD = 500 * 2**20 # 500MB\nFILE_NAME_DEFAULT_TRAIN_PARAMS = os.path.join(os.path.dirname(__file__), \"default_training_params.json\")\nFILE_NAME_TRAIN_PARAMS = \"training_params.json\" # specific params for this experiment\n# main results\nRESULT_PATH = os.path.join(os.path.dirname(__file__), \"results\")\n# testing results\nRESULT_PATH_TEST = os.path.join(os.path.dirname(__file__), \"temporary_results\")\n# cancelled results\nRESULT_PATH_CANCELLED = os.path.join(os.path.dirname(__file__), \"cancelled_results\")\nhf_api = huggingface_hub.HfApi()\nWARMUP_STEP_RATIO = 0.1\n\n\n@dataclass\nclass TrainConfig:\n \"\"\"All configuration parameters associated with training the model\n\n Args:\n model_id: The model identifier\n dtype: The data type to use for the model\n max_seq_length: The maximum sequence length\n batch_size: The batch size for training\n batch_size_eval: The batch size for eval/test, can be much higher than for training\n max_steps: The maximum number of steps to train for\n eval_steps: The number of steps between evaluations\n compile: Whether to compile the model\n query_template: The template for the query\n seed: The random seed\n grad_norm_clip: The gradient norm clipping value (set to 0 to skip)\n optimizer_type: The name of a torch optimizer (e.g. AdamW) or a PEFT method (\"lora+\", \"lora-fa\")\n optimizer_kwargs: The optimizer keyword arguments (lr etc.)\n lr_scheduler: The learning rate scheduler (currently only None or 'cosine' are supported)\n use_amp: Whether to use automatic mixed precision\n autocast_adapter_dtype: Whether to cast adapter dtype to float32, same argument as in PEFT\n generation_kwargs: Arguments passed to transformers GenerationConfig (used in evaluation)\n attn_implementation: The attention implementation to use (if any), see transformers docs\n \"\"\"\n\n model_id: str\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]\n max_seq_length: int\n batch_size: int\n batch_size_eval: int\n max_steps: int\n eval_steps: int\n compile: bool\n query_template: str\n seed: int\n grad_norm_clip: float # set to 0 to skip\n optimizer_type: str\n optimizer_kwargs: dict[str, Any]\n lr_scheduler: Optional[Literal[\"cosine\"]]\n use_amp: bool\n autocast_adapter_dtype: bool\n generation_kwargs: dict[str, Any]\n attn_implementation: Optional[str]\n\n def __post_init__(self) -> None:\n if not isinstance(self.model_id, str):\n raise ValueError(f\"Invalid model_id: {self.model_id}\")\n if self.dtype not in [\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]:\n raise ValueError(f\"Invalid dtype: {self.dtype}\")\n if self.max_seq_length < 0:\n raise ValueError(f\"Invalid max_seq_length: {self.max_seq_length}\")\n if self.batch_size <= 0:\n raise ValueError(f\"Invalid batch_size: {self.batch_size}\")\n if self.batch_size_eval <= 0:\n raise ValueError(f\"Invalid eval batch_size: {self.batch_size_eval}\")\n if self.max_steps <= 0:\n raise ValueError(f\"Invalid max_steps: {self.max_steps}\")\n if self.eval_steps <= 0:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps}\")\n if self.eval_steps > self.max_steps:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps} > max_steps: {self.max_steps}\")\n if self.grad_norm_clip < 0:\n raise ValueError(f\"Invalid grad_norm_clip: {self.grad_norm_clip}\")\n if self.optimizer_type not in [\"lora+\", \"lora-fa\"] and not hasattr(torch.optim, self.optimizer_type):\n raise ValueError(f\"Invalid optimizer_type: {self.optimizer_type}\")\n if self.lr_scheduler not in [None, \"cosine\"]:\n raise ValueError(f\"Invalid lr_scheduler: {self.lr_scheduler}, must be None or 'cosine'\")\n if \"{query}\" not in self.query_template:\n raise ValueError(\"Invalid query_template, must contain '{query}'\")\n\n\ndef validate_experiment_path(path: str) -> str:\n # the experiment path should take the form of ./experiments//\n # e.g. ./experiments/lora/rank32\n # it should contain:\n # - adapter_config.json\n # - optional: training_params.json\n if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS):\n raise FileNotFoundError(\n f\"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory\"\n )\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Path {path} does not exist\")\n\n # check path structure\n path_parts = path.rstrip(os.path.sep).split(os.path.sep)\n if (len(path_parts) != 3) or (path_parts[-3] != \"experiments\"):\n raise ValueError(\n f\"Path {path} does not have the correct structure, should be ./experiments//\"","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.validate_experiment_path","uri":"program://MetaMathQA/function/utils.validate_experiment_path#L139-L160","kind":"function","name":"validate_experiment_path","path":"utils.py","language":"python","start_line":139,"end_line":160,"context_start_line":119,"context_end_line":180,"code":" if self.batch_size <= 0:\n raise ValueError(f\"Invalid batch_size: {self.batch_size}\")\n if self.batch_size_eval <= 0:\n raise ValueError(f\"Invalid eval batch_size: {self.batch_size_eval}\")\n if self.max_steps <= 0:\n raise ValueError(f\"Invalid max_steps: {self.max_steps}\")\n if self.eval_steps <= 0:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps}\")\n if self.eval_steps > self.max_steps:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps} > max_steps: {self.max_steps}\")\n if self.grad_norm_clip < 0:\n raise ValueError(f\"Invalid grad_norm_clip: {self.grad_norm_clip}\")\n if self.optimizer_type not in [\"lora+\", \"lora-fa\"] and not hasattr(torch.optim, self.optimizer_type):\n raise ValueError(f\"Invalid optimizer_type: {self.optimizer_type}\")\n if self.lr_scheduler not in [None, \"cosine\"]:\n raise ValueError(f\"Invalid lr_scheduler: {self.lr_scheduler}, must be None or 'cosine'\")\n if \"{query}\" not in self.query_template:\n raise ValueError(\"Invalid query_template, must contain '{query}'\")\n\n\ndef validate_experiment_path(path: str) -> str:\n # the experiment path should take the form of ./experiments//\n # e.g. ./experiments/lora/rank32\n # it should contain:\n # - adapter_config.json\n # - optional: training_params.json\n if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS):\n raise FileNotFoundError(\n f\"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory\"\n )\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Path {path} does not exist\")\n\n # check path structure\n path_parts = path.rstrip(os.path.sep).split(os.path.sep)\n if (len(path_parts) != 3) or (path_parts[-3] != \"experiments\"):\n raise ValueError(\n f\"Path {path} does not have the correct structure, should be ./experiments//\"\n )\n\n experiment_name = os.path.join(*path_parts[-2:])\n return experiment_name\n\n\ndef get_train_config(path: str) -> TrainConfig:\n # first, load the default params, then update with experiment-specific params\n with open(FILE_NAME_DEFAULT_TRAIN_PARAMS) as f:\n default_config_kwargs = json.load(f)\n\n config_kwargs = {}\n if os.path.exists(path):\n with open(path) as f:\n config_kwargs = json.load(f)\n\n config_kwargs = {**default_config_kwargs, **config_kwargs}\n return TrainConfig(**config_kwargs)\n\n\ndef init_accelerator() -> int:\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n torch.manual_seed(0)\n torch_accelerator_module.reset_peak_memory_stats()","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_train_config","uri":"program://MetaMathQA/function/utils.get_train_config#L163-L174","kind":"function","name":"get_train_config","path":"utils.py","language":"python","start_line":163,"end_line":174,"context_start_line":143,"context_end_line":194,"code":" # - adapter_config.json\n # - optional: training_params.json\n if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS):\n raise FileNotFoundError(\n f\"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory\"\n )\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Path {path} does not exist\")\n\n # check path structure\n path_parts = path.rstrip(os.path.sep).split(os.path.sep)\n if (len(path_parts) != 3) or (path_parts[-3] != \"experiments\"):\n raise ValueError(\n f\"Path {path} does not have the correct structure, should be ./experiments//\"\n )\n\n experiment_name = os.path.join(*path_parts[-2:])\n return experiment_name\n\n\ndef get_train_config(path: str) -> TrainConfig:\n # first, load the default params, then update with experiment-specific params\n with open(FILE_NAME_DEFAULT_TRAIN_PARAMS) as f:\n default_config_kwargs = json.load(f)\n\n config_kwargs = {}\n if os.path.exists(path):\n with open(path) as f:\n config_kwargs = json.load(f)\n\n config_kwargs = {**default_config_kwargs, **config_kwargs}\n return TrainConfig(**config_kwargs)\n\n\ndef init_accelerator() -> int:\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n torch.manual_seed(0)\n torch_accelerator_module.reset_peak_memory_stats()\n torch_accelerator_module.manual_seed_all(0)\n # might not be necessary, but just to be sure\n nn.Linear(1, 1).to(device)\n\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD:\n raise RuntimeError(\n f\"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other \"\n f\"processes are running on {device}.\"\n )\n\n torch_accelerator_module.reset_peak_memory_stats()\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n return accelerator_memory_init","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.init_accelerator","uri":"program://MetaMathQA/function/utils.init_accelerator#L177-L194","kind":"function","name":"init_accelerator","path":"utils.py","language":"python","start_line":177,"end_line":194,"context_start_line":157,"context_end_line":214,"code":" )\n\n experiment_name = os.path.join(*path_parts[-2:])\n return experiment_name\n\n\ndef get_train_config(path: str) -> TrainConfig:\n # first, load the default params, then update with experiment-specific params\n with open(FILE_NAME_DEFAULT_TRAIN_PARAMS) as f:\n default_config_kwargs = json.load(f)\n\n config_kwargs = {}\n if os.path.exists(path):\n with open(path) as f:\n config_kwargs = json.load(f)\n\n config_kwargs = {**default_config_kwargs, **config_kwargs}\n return TrainConfig(**config_kwargs)\n\n\ndef init_accelerator() -> int:\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n torch.manual_seed(0)\n torch_accelerator_module.reset_peak_memory_stats()\n torch_accelerator_module.manual_seed_all(0)\n # might not be necessary, but just to be sure\n nn.Linear(1, 1).to(device)\n\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD:\n raise RuntimeError(\n f\"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other \"\n f\"processes are running on {device}.\"\n )\n\n torch_accelerator_module.reset_peak_memory_stats()\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n return accelerator_memory_init\n\n\ndef get_tokenizer(*, model_id: str, max_seq_length: int):\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n tokenizer.model_max_length = max_seq_length\n if not tokenizer.pad_token:\n tokenizer.pad_token = tokenizer.eos_token\n return tokenizer\n\n\ndef get_base_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n) -> nn.Module:\n kwargs: dict[str, Any] = {\n \"pretrained_model_name_or_path\": model_id,\n \"device_map\": device,","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_tokenizer","uri":"program://MetaMathQA/function/utils.get_tokenizer#L197-L202","kind":"function","name":"get_tokenizer","path":"utils.py","language":"python","start_line":197,"end_line":202,"context_start_line":177,"context_end_line":222,"code":"def init_accelerator() -> int:\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n torch.manual_seed(0)\n torch_accelerator_module.reset_peak_memory_stats()\n torch_accelerator_module.manual_seed_all(0)\n # might not be necessary, but just to be sure\n nn.Linear(1, 1).to(device)\n\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD:\n raise RuntimeError(\n f\"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other \"\n f\"processes are running on {device}.\"\n )\n\n torch_accelerator_module.reset_peak_memory_stats()\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n return accelerator_memory_init\n\n\ndef get_tokenizer(*, model_id: str, max_seq_length: int):\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n tokenizer.model_max_length = max_seq_length\n if not tokenizer.pad_token:\n tokenizer.pad_token = tokenizer.eos_token\n return tokenizer\n\n\ndef get_base_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n) -> nn.Module:\n kwargs: dict[str, Any] = {\n \"pretrained_model_name_or_path\": model_id,\n \"device_map\": device,\n \"attn_implementation\": attn_implementation,\n }\n if dtype == \"int4\":\n quant_config = BitsAndBytesConfig(load_in_4bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"int8\":\n quant_config = BitsAndBytesConfig(load_in_8bit=True)\n kwargs[\"quantization_config\"] = quant_config","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_base_model","uri":"program://MetaMathQA/function/utils.get_base_model#L205-L238","kind":"function","name":"get_base_model","path":"utils.py","language":"python","start_line":205,"end_line":238,"context_start_line":185,"context_end_line":258,"code":" accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD:\n raise RuntimeError(\n f\"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other \"\n f\"processes are running on {device}.\"\n )\n\n torch_accelerator_module.reset_peak_memory_stats()\n accelerator_memory_init = torch_accelerator_module.max_memory_reserved()\n return accelerator_memory_init\n\n\ndef get_tokenizer(*, model_id: str, max_seq_length: int):\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n tokenizer.model_max_length = max_seq_length\n if not tokenizer.pad_token:\n tokenizer.pad_token = tokenizer.eos_token\n return tokenizer\n\n\ndef get_base_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n) -> nn.Module:\n kwargs: dict[str, Any] = {\n \"pretrained_model_name_or_path\": model_id,\n \"device_map\": device,\n \"attn_implementation\": attn_implementation,\n }\n if dtype == \"int4\":\n quant_config = BitsAndBytesConfig(load_in_4bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"int8\":\n quant_config = BitsAndBytesConfig(load_in_8bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"bfloat16\":\n kwargs[\"torch_dtype\"] = torch.bfloat16\n elif dtype == \"float16\":\n kwargs[\"torch_dtype\"] = torch.float16\n elif dtype != \"float32\":\n raise ValueError(f\"Invalid dtype: {dtype}\")\n\n model = AutoModelForCausalLM.from_pretrained(**kwargs)\n\n if dtype in [\"int8\", \"int4\"]:\n model = prepare_model_for_kbit_training(model)\n\n if compile:\n model = torch.compile(model)\n\n return model\n\n\ndef get_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n peft_config: Optional[PeftConfig],\n autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_model","uri":"program://MetaMathQA/function/utils.get_model#L241-L257","kind":"function","name":"get_model","path":"utils.py","language":"python","start_line":241,"end_line":257,"context_start_line":221,"context_end_line":277,"code":" quant_config = BitsAndBytesConfig(load_in_8bit=True)\n kwargs[\"quantization_config\"] = quant_config\n elif dtype == \"bfloat16\":\n kwargs[\"torch_dtype\"] = torch.bfloat16\n elif dtype == \"float16\":\n kwargs[\"torch_dtype\"] = torch.float16\n elif dtype != \"float32\":\n raise ValueError(f\"Invalid dtype: {dtype}\")\n\n model = AutoModelForCausalLM.from_pretrained(**kwargs)\n\n if dtype in [\"int8\", \"int4\"]:\n model = prepare_model_for_kbit_training(model)\n\n if compile:\n model = torch.compile(model)\n\n return model\n\n\ndef get_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n peft_config: Optional[PeftConfig],\n autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.DummyScheduler","uri":"program://MetaMathQA/class/utils.DummyScheduler#L260-L269","kind":"class","name":"DummyScheduler","path":"utils.py","language":"python","start_line":260,"end_line":269,"context_start_line":240,"context_end_line":289,"code":"\ndef get_model(\n *,\n model_id: str,\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"],\n compile: bool,\n attn_implementation: Optional[str],\n peft_config: Optional[PeftConfig],\n autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps\n )\n elif lr_scheduler_arg is None:\n lr_scheduler = DummyScheduler(optimizer_kwargs[\"lr\"])","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_optimizer_and_scheduler","uri":"program://MetaMathQA/function/utils.get_optimizer_and_scheduler#L272-L293","kind":"function","name":"get_optimizer_and_scheduler","path":"utils.py","language":"python","start_line":272,"end_line":293,"context_start_line":252,"context_end_line":313,"code":" )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps\n )\n elif lr_scheduler_arg is None:\n lr_scheduler = DummyScheduler(optimizer_kwargs[\"lr\"])\n else:\n raise ValueError(f\"Invalid lr_scheduler argument: {lr_scheduler_arg}\")\n\n return optimizer, lr_scheduler\n\n\nclass BucketIterator:\n \"\"\"\n Iterator that yields batches of data from a torch Dataset, grouped in buckets by sequence length\n\n The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length.\n This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus\n introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`.\n\n Args:\n ds: The torch Dataset to iterate over\n batch_size: The batch size\n bucket_factor: The factor by which to multiply the batch size to determine the bucket size\n delete_cols: The columns to delete from the dataset before yielding a batch\n \"\"\"\n\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.BucketIterator","uri":"program://MetaMathQA/class/utils.BucketIterator#L296-L339","kind":"class","name":"BucketIterator","path":"utils.py","language":"python","start_line":296,"end_line":339,"context_start_line":276,"context_end_line":359,"code":" optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps\n )\n elif lr_scheduler_arg is None:\n lr_scheduler = DummyScheduler(optimizer_kwargs[\"lr\"])\n else:\n raise ValueError(f\"Invalid lr_scheduler argument: {lr_scheduler_arg}\")\n\n return optimizer, lr_scheduler\n\n\nclass BucketIterator:\n \"\"\"\n Iterator that yields batches of data from a torch Dataset, grouped in buckets by sequence length\n\n The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length.\n This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus\n introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`.\n\n Args:\n ds: The torch Dataset to iterate over\n batch_size: The batch size\n bucket_factor: The factor by which to multiply the batch size to determine the bucket size\n delete_cols: The columns to delete from the dataset before yielding a batch\n \"\"\"\n\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size\n self.bucket_factor = bucket_factor\n self.delete_cols = set(delete_cols)\n\n assert self.bucket_factor > 0, \"bucket_factor must be greater than 0\"\n\n def _batch_iterator(self, bucket):\n tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket[\"input_ids\"]])\n # sort long to short instead to encounter possible OOM errors as early as possible\n sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:\n bucket = self.ds[-(len(self.ds) % bucket_size) :]\n yield from self._batch_iterator(bucket)\n\n\ndef get_file_size(\n model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None]\n) -> int:\n file_size = 99999999 # set a default dummy value\n if peft_config is not None:\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir:\n model.save_pretrained(tmp_dir)\n stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME))\n file_size = stat.st_size\n if not clean:\n print_fn(f\"Saved PEFT checkpoint to {tmp_dir}\")\n except Exception as exc:\n print(f\"Failed to save PEFT checkpoint due to the following error: {exc}\")\n else:\n print_fn(\"Not saving the fully fine-tuned model because it's too big, estimating the size instead\")\n try:\n num_params = model.num_parameters()","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_file_size","uri":"program://MetaMathQA/function/utils.get_file_size#L342-L364","kind":"function","name":"get_file_size","path":"utils.py","language":"python","start_line":342,"end_line":364,"context_start_line":322,"context_end_line":384,"code":" sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:\n bucket = self.ds[-(len(self.ds) % bucket_size) :]\n yield from self._batch_iterator(bucket)\n\n\ndef get_file_size(\n model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None]\n) -> int:\n file_size = 99999999 # set a default dummy value\n if peft_config is not None:\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir:\n model.save_pretrained(tmp_dir)\n stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME))\n file_size = stat.st_size\n if not clean:\n print_fn(f\"Saved PEFT checkpoint to {tmp_dir}\")\n except Exception as exc:\n print(f\"Failed to save PEFT checkpoint due to the following error: {exc}\")\n else:\n print_fn(\"Not saving the fully fine-tuned model because it's too big, estimating the size instead\")\n try:\n num_params = model.num_parameters()\n dtype_size = next(model.parameters()).element_size()\n file_size = num_params * dtype_size\n except Exception as exc:\n print(f\"Failed to determine file size for fully finetuned model because of: {exc}\")\n return file_size\n\n\n##################\n# ANSWER PARSING #\n##################\n\n\ndef parse_answer(text: str) -> Optional[str]:\n \"\"\"\n A label/prediction can look like this:\n\n Question: If the magnitude of vector v is equal to 4, what is the dot product of vector v with itself?. Think step\n by step\n Answer: The dot product of a vector with itself is equal to the square of its magnitude. So, the dot product of\n vector v with itself is equal to $4^2 = \\boxed{16}$.The answer is: 16\n\n We want to extract '16' from this string.\n\n \"\"\"\n # This implementation is based on sampling meta-llama/Llama-3.1-8B-Instruct. It may not work for other models.","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.parse_answer","uri":"program://MetaMathQA/function/utils.parse_answer#L372-L407","kind":"function","name":"parse_answer","path":"utils.py","language":"python","start_line":372,"end_line":407,"context_start_line":352,"context_end_line":427,"code":" if not clean:\n print_fn(f\"Saved PEFT checkpoint to {tmp_dir}\")\n except Exception as exc:\n print(f\"Failed to save PEFT checkpoint due to the following error: {exc}\")\n else:\n print_fn(\"Not saving the fully fine-tuned model because it's too big, estimating the size instead\")\n try:\n num_params = model.num_parameters()\n dtype_size = next(model.parameters()).element_size()\n file_size = num_params * dtype_size\n except Exception as exc:\n print(f\"Failed to determine file size for fully finetuned model because of: {exc}\")\n return file_size\n\n\n##################\n# ANSWER PARSING #\n##################\n\n\ndef parse_answer(text: str) -> Optional[str]:\n \"\"\"\n A label/prediction can look like this:\n\n Question: If the magnitude of vector v is equal to 4, what is the dot product of vector v with itself?. Think step\n by step\n Answer: The dot product of a vector with itself is equal to the square of its magnitude. So, the dot product of\n vector v with itself is equal to $4^2 = \\boxed{16}$.The answer is: 16\n\n We want to extract '16' from this string.\n\n \"\"\"\n # This implementation is based on sampling meta-llama/Llama-3.1-8B-Instruct. It may not work for other models.\n candidate_delimiters = [\n # MetaMath:\n \"The answer is: \",\n \"The answer is \",\n \"The final answer is: \",\n \"The final answer is \",\n # GSM8K:\n \"#### \",\n ]\n text = text.strip()\n text = text.rstrip(\".!?\")\n for delimiter in candidate_delimiters:\n if delimiter in text:\n break\n else: # no match\n return None\n\n text = text.rpartition(delimiter)[-1].strip()\n # if a new paragraph follows after the final answer, we want to remove it\n text = text.split(\"\\n\", 1)[0]\n # note: we can just remove % here since the GSM8K dataset just omits it, i.e. 50% -> 50, no need to divide by 100\n text = text.strip(\" .!?$%\")\n return text\n\n\ndef convert_to_decimal(s: Optional[str]) -> Optional[Decimal]:\n \"\"\"\n Converts a string representing a number to a Decimal.\n\n The string may be:\n - A simple number (e.g., \"13\", \"65.33\")\n - A fraction (e.g., \"20/14\")\n \"\"\"\n if s is None:\n return None\n\n try:\n s = s.strip()\n # Check if the string represents a fraction.\n if \"/\" in s:\n parts = s.split(\"/\")\n if len(parts) != 2:\n return None","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.convert_to_decimal","uri":"program://MetaMathQA/function/utils.convert_to_decimal#L410-L438","kind":"function","name":"convert_to_decimal","path":"utils.py","language":"python","start_line":410,"end_line":438,"context_start_line":390,"context_end_line":458,"code":" \"The final answer is \",\n # GSM8K:\n \"#### \",\n ]\n text = text.strip()\n text = text.rstrip(\".!?\")\n for delimiter in candidate_delimiters:\n if delimiter in text:\n break\n else: # no match\n return None\n\n text = text.rpartition(delimiter)[-1].strip()\n # if a new paragraph follows after the final answer, we want to remove it\n text = text.split(\"\\n\", 1)[0]\n # note: we can just remove % here since the GSM8K dataset just omits it, i.e. 50% -> 50, no need to divide by 100\n text = text.strip(\" .!?$%\")\n return text\n\n\ndef convert_to_decimal(s: Optional[str]) -> Optional[Decimal]:\n \"\"\"\n Converts a string representing a number to a Decimal.\n\n The string may be:\n - A simple number (e.g., \"13\", \"65.33\")\n - A fraction (e.g., \"20/14\")\n \"\"\"\n if s is None:\n return None\n\n try:\n s = s.strip()\n # Check if the string represents a fraction.\n if \"/\" in s:\n parts = s.split(\"/\")\n if len(parts) != 2:\n return None\n numerator = Decimal(parts[0].strip())\n denominator = Decimal(parts[1].strip())\n if denominator == 0:\n return None\n value = numerator / denominator\n else:\n # Parse as a regular decimal or integer string.\n value = Decimal(s)\n return value\n except (DivisionByZero, InvalidOperation, ValueError):\n return None\n\n\ndef get_accuracy(*, predictions: list[str], responses: list[str]) -> float:\n if len(predictions) != len(responses):\n raise ValueError(f\"Prediction length mismatch: {len(predictions)} != {len(responses)}\")\n\n y_true: list[str | float | None] = []\n y_pred: list[str | float | None] = []\n\n for prediction, response in zip(predictions, responses):\n parsed_prediction = parse_answer(prediction)\n parsed_response = parse_answer(response)\n if parsed_response is None:\n raise ValueError(f\"Error encountered while trying to parse response: {response}\")\n\n decimal_prediction = convert_to_decimal(parsed_prediction)\n decimal_answer = convert_to_decimal(parsed_response)\n if decimal_prediction is not None:\n y_pred.append(float(decimal_prediction))\n elif parsed_prediction is not None:","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_accuracy","uri":"program://MetaMathQA/function/utils.get_accuracy#L441-L480","kind":"function","name":"get_accuracy","path":"utils.py","language":"python","start_line":441,"end_line":480,"context_start_line":421,"context_end_line":500,"code":" try:\n s = s.strip()\n # Check if the string represents a fraction.\n if \"/\" in s:\n parts = s.split(\"/\")\n if len(parts) != 2:\n return None\n numerator = Decimal(parts[0].strip())\n denominator = Decimal(parts[1].strip())\n if denominator == 0:\n return None\n value = numerator / denominator\n else:\n # Parse as a regular decimal or integer string.\n value = Decimal(s)\n return value\n except (DivisionByZero, InvalidOperation, ValueError):\n return None\n\n\ndef get_accuracy(*, predictions: list[str], responses: list[str]) -> float:\n if len(predictions) != len(responses):\n raise ValueError(f\"Prediction length mismatch: {len(predictions)} != {len(responses)}\")\n\n y_true: list[str | float | None] = []\n y_pred: list[str | float | None] = []\n\n for prediction, response in zip(predictions, responses):\n parsed_prediction = parse_answer(prediction)\n parsed_response = parse_answer(response)\n if parsed_response is None:\n raise ValueError(f\"Error encountered while trying to parse response: {response}\")\n\n decimal_prediction = convert_to_decimal(parsed_prediction)\n decimal_answer = convert_to_decimal(parsed_response)\n if decimal_prediction is not None:\n y_pred.append(float(decimal_prediction))\n elif parsed_prediction is not None:\n y_pred.append(parsed_prediction)\n else:\n y_pred.append(None)\n\n # we convert decimals to float so that stuff like this works:\n # float(convert_to_decimal('20/35')) == float(convert_to_decimal('0.5714285714285714'))\n if decimal_answer is not None:\n y_true.append(float(decimal_answer))\n elif parsed_prediction is not None:\n y_true.append(parsed_response)\n else:\n y_true.append(None)\n\n correct: list[bool] = []\n for true, pred in zip(y_true, y_pred):\n if (true is not None) and (pred is not None):\n correct.append(true == pred)\n else:\n correct.append(False)\n\n accuracy = sum(correct) / len(correct)\n return accuracy\n\n\n###########\n# LOGGING #\n###########\n\n\ndef get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]:\n try:\n return hf_api.model_info(model_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_base_model_info","uri":"program://MetaMathQA/function/utils.get_base_model_info#L488-L493","kind":"function","name":"get_base_model_info","path":"utils.py","language":"python","start_line":488,"end_line":493,"context_start_line":468,"context_end_line":513,"code":" y_true.append(parsed_response)\n else:\n y_true.append(None)\n\n correct: list[bool] = []\n for true, pred in zip(y_true, y_pred):\n if (true is not None) and (pred is not None):\n correct.append(true == pred)\n else:\n correct.append(False)\n\n accuracy = sum(correct) / len(correct)\n return accuracy\n\n\n###########\n# LOGGING #\n###########\n\n\ndef get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]:\n try:\n return hf_api.model_info(model_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")\n return None\n\n\ndef get_git_hash(module) -> Optional[str]:\n if \"site-packages\" in module.__path__[0]:\n return None\n\n return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_dataset_info","uri":"program://MetaMathQA/function/utils.get_dataset_info#L496-L501","kind":"function","name":"get_dataset_info","path":"utils.py","language":"python","start_line":496,"end_line":501,"context_start_line":476,"context_end_line":521,"code":" else:\n correct.append(False)\n\n accuracy = sum(correct) / len(correct)\n return accuracy\n\n\n###########\n# LOGGING #\n###########\n\n\ndef get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]:\n try:\n return hf_api.model_info(model_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")\n return None\n\n\ndef get_git_hash(module) -> Optional[str]:\n if \"site-packages\" in module.__path__[0]:\n return None\n\n return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {\n \"transformers-version\": transformers.__version__,\n \"transformers-commit-hash\": get_git_hash(transformers),\n \"peft-version\": peft.__version__,\n \"peft-commit-hash\": get_git_hash(peft),\n \"datasets-version\": datasets.__version__,\n \"datasets-commit-hash\": get_git_hash(datasets),\n \"bitsandbytes-version\": bitsandbytes.__version__,\n \"bitsandbytes-commit-hash\": get_git_hash(bitsandbytes),","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_git_hash","uri":"program://MetaMathQA/function/utils.get_git_hash#L504-L508","kind":"function","name":"get_git_hash","path":"utils.py","language":"python","start_line":504,"end_line":508,"context_start_line":484,"context_end_line":528,"code":"# LOGGING #\n###########\n\n\ndef get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]:\n try:\n return hf_api.model_info(model_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")\n return None\n\n\ndef get_git_hash(module) -> Optional[str]:\n if \"site-packages\" in module.__path__[0]:\n return None\n\n return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {\n \"transformers-version\": transformers.__version__,\n \"transformers-commit-hash\": get_git_hash(transformers),\n \"peft-version\": peft.__version__,\n \"peft-commit-hash\": get_git_hash(peft),\n \"datasets-version\": datasets.__version__,\n \"datasets-commit-hash\": get_git_hash(datasets),\n \"bitsandbytes-version\": bitsandbytes.__version__,\n \"bitsandbytes-commit-hash\": get_git_hash(bitsandbytes),\n \"torch-version\": torch.__version__,\n \"torch-commit-hash\": get_git_hash(torch),\n }\n return package_info\n\n\ndef get_system_info() -> dict[str, str]:","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_package_info","uri":"program://MetaMathQA/function/utils.get_package_info#L511-L525","kind":"function","name":"get_package_info","path":"utils.py","language":"python","start_line":511,"end_line":525,"context_start_line":491,"context_end_line":545,"code":" except Exception as exc:\n warnings.warn(f\"Could not retrieve model info, failed with error {exc}\")\n return None\n\n\ndef get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]:\n try:\n return hf_api.dataset_info(dataset_id)\n except Exception as exc:\n warnings.warn(f\"Could not retrieve dataset info, failed with error {exc}\")\n return None\n\n\ndef get_git_hash(module) -> Optional[str]:\n if \"site-packages\" in module.__path__[0]:\n return None\n\n return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {\n \"transformers-version\": transformers.__version__,\n \"transformers-commit-hash\": get_git_hash(transformers),\n \"peft-version\": peft.__version__,\n \"peft-commit-hash\": get_git_hash(peft),\n \"datasets-version\": datasets.__version__,\n \"datasets-commit-hash\": get_git_hash(datasets),\n \"bitsandbytes-version\": bitsandbytes.__version__,\n \"bitsandbytes-commit-hash\": get_git_hash(bitsandbytes),\n \"torch-version\": torch.__version__,\n \"torch-commit-hash\": get_git_hash(torch),\n }\n return package_info\n\n\ndef get_system_info() -> dict[str, str]:\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n system_info = {\n \"system\": platform.system(),\n \"release\": platform.release(),\n \"version\": platform.version(),\n \"machine\": platform.machine(),\n \"processor\": platform.processor(),\n \"accelerator\": torch_accelerator_module.get_device_name(0),\n }\n return system_info\n\n\n@dataclass\nclass MetaInfo:\n package_info: dict[str, Optional[str]]\n system_info: dict[str, str]","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_system_info","uri":"program://MetaMathQA/function/utils.get_system_info#L528-L539","kind":"function","name":"get_system_info","path":"utils.py","language":"python","start_line":528,"end_line":539,"context_start_line":508,"context_end_line":559,"code":" return subprocess.check_output(\"git rev-parse HEAD\".split(), cwd=os.path.dirname(module.__file__)).decode().strip()\n\n\ndef get_package_info() -> dict[str, Optional[str]]:\n \"\"\"Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch\"\"\"\n package_info = {\n \"transformers-version\": transformers.__version__,\n \"transformers-commit-hash\": get_git_hash(transformers),\n \"peft-version\": peft.__version__,\n \"peft-commit-hash\": get_git_hash(peft),\n \"datasets-version\": datasets.__version__,\n \"datasets-commit-hash\": get_git_hash(datasets),\n \"bitsandbytes-version\": bitsandbytes.__version__,\n \"bitsandbytes-commit-hash\": get_git_hash(bitsandbytes),\n \"torch-version\": torch.__version__,\n \"torch-commit-hash\": get_git_hash(torch),\n }\n return package_info\n\n\ndef get_system_info() -> dict[str, str]:\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n system_info = {\n \"system\": platform.system(),\n \"release\": platform.release(),\n \"version\": platform.version(),\n \"machine\": platform.machine(),\n \"processor\": platform.processor(),\n \"accelerator\": torch_accelerator_module.get_device_name(0),\n }\n return system_info\n\n\n@dataclass\nclass MetaInfo:\n package_info: dict[str, Optional[str]]\n system_info: dict[str, str]\n pytorch_info: str\n\n\ndef get_meta_info() -> MetaInfo:\n meta_info = MetaInfo(\n package_info=get_package_info(),\n system_info=get_system_info(),\n pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.MetaInfo","uri":"program://MetaMathQA/class/utils.MetaInfo#L543-L546","kind":"class","name":"MetaInfo","path":"utils.py","language":"python","start_line":543,"end_line":546,"context_start_line":523,"context_end_line":566,"code":" \"torch-commit-hash\": get_git_hash(torch),\n }\n return package_info\n\n\ndef get_system_info() -> dict[str, str]:\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n system_info = {\n \"system\": platform.system(),\n \"release\": platform.release(),\n \"version\": platform.version(),\n \"machine\": platform.machine(),\n \"processor\": platform.processor(),\n \"accelerator\": torch_accelerator_module.get_device_name(0),\n }\n return system_info\n\n\n@dataclass\nclass MetaInfo:\n package_info: dict[str, Optional[str]]\n system_info: dict[str, str]\n pytorch_info: str\n\n\ndef get_meta_info() -> MetaInfo:\n meta_info = MetaInfo(\n package_info=get_package_info(),\n system_info=get_system_info(),\n pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (\n subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split(), cwd=os.path.dirname(peft.__file__))\n .decode()\n .strip()\n )\n\n\nclass TrainStatus(enum.Enum):","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_meta_info","uri":"program://MetaMathQA/function/utils.get_meta_info#L549-L555","kind":"function","name":"get_meta_info","path":"utils.py","language":"python","start_line":549,"end_line":555,"context_start_line":529,"context_end_line":575,"code":" device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n system_info = {\n \"system\": platform.system(),\n \"release\": platform.release(),\n \"version\": platform.version(),\n \"machine\": platform.machine(),\n \"processor\": platform.processor(),\n \"accelerator\": torch_accelerator_module.get_device_name(0),\n }\n return system_info\n\n\n@dataclass\nclass MetaInfo:\n package_info: dict[str, Optional[str]]\n system_info: dict[str, str]\n pytorch_info: str\n\n\ndef get_meta_info() -> MetaInfo:\n meta_info = MetaInfo(\n package_info=get_package_info(),\n system_info=get_system_info(),\n pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (\n subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split(), cwd=os.path.dirname(peft.__file__))\n .decode()\n .strip()\n )\n\n\nclass TrainStatus(enum.Enum):\n FAILED = \"failed\"\n SUCCESS = \"success\"\n CANCELED = \"canceled\"\n\n\n@dataclass\nclass TrainResult:\n status: TrainStatus\n train_time: float","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_peft_branch","uri":"program://MetaMathQA/function/utils.get_peft_branch#L558-L563","kind":"function","name":"get_peft_branch","path":"utils.py","language":"python","start_line":558,"end_line":563,"context_start_line":538,"context_end_line":583,"code":" }\n return system_info\n\n\n@dataclass\nclass MetaInfo:\n package_info: dict[str, Optional[str]]\n system_info: dict[str, str]\n pytorch_info: str\n\n\ndef get_meta_info() -> MetaInfo:\n meta_info = MetaInfo(\n package_info=get_package_info(),\n system_info=get_system_info(),\n pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (\n subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split(), cwd=os.path.dirname(peft.__file__))\n .decode()\n .strip()\n )\n\n\nclass TrainStatus(enum.Enum):\n FAILED = \"failed\"\n SUCCESS = \"success\"\n CANCELED = \"canceled\"\n\n\n@dataclass\nclass TrainResult:\n status: TrainStatus\n train_time: float\n accelerator_memory_reserved_log: list[int]\n losses: list[float]\n metrics: list[Any] # TODO\n error_msg: str\n num_trainable_params: int\n num_total_params: int\n\n","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.TrainStatus","uri":"program://MetaMathQA/class/utils.TrainStatus#L566-L569","kind":"class","name":"TrainStatus","path":"utils.py","language":"python","start_line":566,"end_line":569,"context_start_line":546,"context_end_line":589,"code":" pytorch_info: str\n\n\ndef get_meta_info() -> MetaInfo:\n meta_info = MetaInfo(\n package_info=get_package_info(),\n system_info=get_system_info(),\n pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (\n subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split(), cwd=os.path.dirname(peft.__file__))\n .decode()\n .strip()\n )\n\n\nclass TrainStatus(enum.Enum):\n FAILED = \"failed\"\n SUCCESS = \"success\"\n CANCELED = \"canceled\"\n\n\n@dataclass\nclass TrainResult:\n status: TrainStatus\n train_time: float\n accelerator_memory_reserved_log: list[int]\n losses: list[float]\n metrics: list[Any] # TODO\n error_msg: str\n num_trainable_params: int\n num_total_params: int\n\n\ndef log_to_console(log_data: dict[str, Any], print_fn: Callable[..., None]) -> None:\n accelerator_memory_max = log_data[\"train_info\"][\"accelerator_memory_max\"]\n accelerator_memory_avg = log_data[\"train_info\"][\"accelerator_memory_reserved_avg\"]\n accelerator_memory_reserved_99th = log_data[\"train_info\"][\"accelerator_memory_reserved_99th\"]\n time_train = log_data[\"train_info\"][\"train_time\"]\n time_total = log_data[\"run_info\"][\"total_time\"]","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.TrainResult","uri":"program://MetaMathQA/class/utils.TrainResult#L573-L581","kind":"class","name":"TrainResult","path":"utils.py","language":"python","start_line":573,"end_line":581,"context_start_line":553,"context_end_line":601,"code":" pytorch_info=torch.__config__.show(),\n )\n return meta_info\n\n\ndef get_peft_branch() -> str:\n return (\n subprocess.check_output(\"git rev-parse --abbrev-ref HEAD\".split(), cwd=os.path.dirname(peft.__file__))\n .decode()\n .strip()\n )\n\n\nclass TrainStatus(enum.Enum):\n FAILED = \"failed\"\n SUCCESS = \"success\"\n CANCELED = \"canceled\"\n\n\n@dataclass\nclass TrainResult:\n status: TrainStatus\n train_time: float\n accelerator_memory_reserved_log: list[int]\n losses: list[float]\n metrics: list[Any] # TODO\n error_msg: str\n num_trainable_params: int\n num_total_params: int\n\n\ndef log_to_console(log_data: dict[str, Any], print_fn: Callable[..., None]) -> None:\n accelerator_memory_max = log_data[\"train_info\"][\"accelerator_memory_max\"]\n accelerator_memory_avg = log_data[\"train_info\"][\"accelerator_memory_reserved_avg\"]\n accelerator_memory_reserved_99th = log_data[\"train_info\"][\"accelerator_memory_reserved_99th\"]\n time_train = log_data[\"train_info\"][\"train_time\"]\n time_total = log_data[\"run_info\"][\"total_time\"]\n file_size = log_data[\"train_info\"][\"file_size\"]\n\n print_fn(f\"accelerator memory max: {accelerator_memory_max // 2**20}MB\")\n print_fn(f\"accelerator memory reserved avg: {accelerator_memory_avg // 2**20}MB\")\n print_fn(f\"accelerator memory reserved 99th percentile: {accelerator_memory_reserved_99th // 2**20}MB\")\n print_fn(f\"train time: {time_train}s\")\n print_fn(f\"total time: {time_total:.2f}s\")\n print_fn(f\"file size of checkpoint: {file_size / 2**20:.1f}MB\")\n\n\ndef log_to_file(\n *, log_data: dict, save_dir: str, experiment_name: str, timestamp: str, print_fn: Callable[..., None]","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.log_to_console","uri":"program://MetaMathQA/function/utils.log_to_console#L584-L597","kind":"function","name":"log_to_console","path":"utils.py","language":"python","start_line":584,"end_line":597,"context_start_line":564,"context_end_line":617,"code":"\n\nclass TrainStatus(enum.Enum):\n FAILED = \"failed\"\n SUCCESS = \"success\"\n CANCELED = \"canceled\"\n\n\n@dataclass\nclass TrainResult:\n status: TrainStatus\n train_time: float\n accelerator_memory_reserved_log: list[int]\n losses: list[float]\n metrics: list[Any] # TODO\n error_msg: str\n num_trainable_params: int\n num_total_params: int\n\n\ndef log_to_console(log_data: dict[str, Any], print_fn: Callable[..., None]) -> None:\n accelerator_memory_max = log_data[\"train_info\"][\"accelerator_memory_max\"]\n accelerator_memory_avg = log_data[\"train_info\"][\"accelerator_memory_reserved_avg\"]\n accelerator_memory_reserved_99th = log_data[\"train_info\"][\"accelerator_memory_reserved_99th\"]\n time_train = log_data[\"train_info\"][\"train_time\"]\n time_total = log_data[\"run_info\"][\"total_time\"]\n file_size = log_data[\"train_info\"][\"file_size\"]\n\n print_fn(f\"accelerator memory max: {accelerator_memory_max // 2**20}MB\")\n print_fn(f\"accelerator memory reserved avg: {accelerator_memory_avg // 2**20}MB\")\n print_fn(f\"accelerator memory reserved 99th percentile: {accelerator_memory_reserved_99th // 2**20}MB\")\n print_fn(f\"train time: {time_train}s\")\n print_fn(f\"total time: {time_total:.2f}s\")\n print_fn(f\"file size of checkpoint: {file_size / 2**20:.1f}MB\")\n\n\ndef log_to_file(\n *, log_data: dict, save_dir: str, experiment_name: str, timestamp: str, print_fn: Callable[..., None]\n) -> None:\n if save_dir.endswith(RESULT_PATH):\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}.json\"\n else:\n # For cancelled and temporary runs, we want to include the timestamp, as these runs are not tracked in git, thus\n # we need unique names to avoid losing history.\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}--{timestamp.replace(':', '-')}.json\"\n file_name = os.path.join(save_dir, file_name)\n with open(file_name, \"w\") as f:\n json.dump(log_data, f, indent=2)\n print_fn(f\"Saved log to: {file_name}\")\n\n\ndef log_results(\n *,\n experiment_name: str,","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.log_to_file","uri":"program://MetaMathQA/function/utils.log_to_file#L600-L612","kind":"function","name":"log_to_file","path":"utils.py","language":"python","start_line":600,"end_line":612,"context_start_line":580,"context_end_line":632,"code":" num_trainable_params: int\n num_total_params: int\n\n\ndef log_to_console(log_data: dict[str, Any], print_fn: Callable[..., None]) -> None:\n accelerator_memory_max = log_data[\"train_info\"][\"accelerator_memory_max\"]\n accelerator_memory_avg = log_data[\"train_info\"][\"accelerator_memory_reserved_avg\"]\n accelerator_memory_reserved_99th = log_data[\"train_info\"][\"accelerator_memory_reserved_99th\"]\n time_train = log_data[\"train_info\"][\"train_time\"]\n time_total = log_data[\"run_info\"][\"total_time\"]\n file_size = log_data[\"train_info\"][\"file_size\"]\n\n print_fn(f\"accelerator memory max: {accelerator_memory_max // 2**20}MB\")\n print_fn(f\"accelerator memory reserved avg: {accelerator_memory_avg // 2**20}MB\")\n print_fn(f\"accelerator memory reserved 99th percentile: {accelerator_memory_reserved_99th // 2**20}MB\")\n print_fn(f\"train time: {time_train}s\")\n print_fn(f\"total time: {time_total:.2f}s\")\n print_fn(f\"file size of checkpoint: {file_size / 2**20:.1f}MB\")\n\n\ndef log_to_file(\n *, log_data: dict, save_dir: str, experiment_name: str, timestamp: str, print_fn: Callable[..., None]\n) -> None:\n if save_dir.endswith(RESULT_PATH):\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}.json\"\n else:\n # For cancelled and temporary runs, we want to include the timestamp, as these runs are not tracked in git, thus\n # we need unique names to avoid losing history.\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}--{timestamp.replace(':', '-')}.json\"\n file_name = os.path.join(save_dir, file_name)\n with open(file_name, \"w\") as f:\n json.dump(log_data, f, indent=2)\n print_fn(f\"Saved log to: {file_name}\")\n\n\ndef log_results(\n *,\n experiment_name: str,\n train_result: TrainResult,\n accelerator_memory_init: int,\n time_total: float,\n file_size: int,\n model_info: Optional[huggingface_hub.ModelInfo],\n datasets_info: dict[str, Optional[huggingface_hub.DatasetInfo]],\n start_date: str,\n train_config: TrainConfig,\n peft_config: Optional[PeftConfig],\n print_fn: Callable[..., None],\n) -> None:\n # collect results\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n accelerator_memory_final = torch_accelerator_module.max_memory_reserved()","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.log_results","uri":"program://MetaMathQA/function/utils.log_results#L615-L710","kind":"function","name":"log_results","path":"utils.py","language":"python","start_line":615,"end_line":710,"context_start_line":595,"context_end_line":710,"code":" print_fn(f\"train time: {time_train}s\")\n print_fn(f\"total time: {time_total:.2f}s\")\n print_fn(f\"file size of checkpoint: {file_size / 2**20:.1f}MB\")\n\n\ndef log_to_file(\n *, log_data: dict, save_dir: str, experiment_name: str, timestamp: str, print_fn: Callable[..., None]\n) -> None:\n if save_dir.endswith(RESULT_PATH):\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}.json\"\n else:\n # For cancelled and temporary runs, we want to include the timestamp, as these runs are not tracked in git, thus\n # we need unique names to avoid losing history.\n file_name = f\"{experiment_name.replace(os.path.sep, '--')}--{timestamp.replace(':', '-')}.json\"\n file_name = os.path.join(save_dir, file_name)\n with open(file_name, \"w\") as f:\n json.dump(log_data, f, indent=2)\n print_fn(f\"Saved log to: {file_name}\")\n\n\ndef log_results(\n *,\n experiment_name: str,\n train_result: TrainResult,\n accelerator_memory_init: int,\n time_total: float,\n file_size: int,\n model_info: Optional[huggingface_hub.ModelInfo],\n datasets_info: dict[str, Optional[huggingface_hub.DatasetInfo]],\n start_date: str,\n train_config: TrainConfig,\n peft_config: Optional[PeftConfig],\n print_fn: Callable[..., None],\n) -> None:\n # collect results\n device = infer_device()\n torch_accelerator_module = getattr(torch, device, torch.cuda)\n accelerator_memory_final = torch_accelerator_module.max_memory_reserved()\n accelerator_memory_avg = int(\n sum(train_result.accelerator_memory_reserved_log) / len(train_result.accelerator_memory_reserved_log)\n )\n accelerator_memory_reserved_99th = int(np.percentile(train_result.accelerator_memory_reserved_log, 99))\n\n meta_info = get_meta_info()\n if model_info is not None:\n model_sha = model_info.sha\n model_created_at = model_info.created_at.isoformat()\n else:\n model_sha = None\n model_created_at = None\n\n dataset_info_log = {}\n for key, dataset_info in datasets_info.items():\n if dataset_info is not None:\n dataset_sha = dataset_info.sha\n dataset_created_at = dataset_info.created_at.isoformat()\n else:\n dataset_sha = None\n dataset_created_at = None\n dataset_info_log[key] = {\"sha\": dataset_sha, \"created_at\": dataset_created_at}\n\n peft_branch = get_peft_branch()\n\n if train_result.status == TrainStatus.CANCELED:\n save_dir = RESULT_PATH_CANCELLED\n print_fn(\"Experiment run was categorized as canceled\")\n elif peft_branch != \"main\":\n save_dir = RESULT_PATH_TEST\n print_fn(f\"Experiment run was categorized as a test run on branch {peft_branch}\")\n elif train_result.status == TrainStatus.SUCCESS:\n save_dir = RESULT_PATH\n print_fn(\"Experiment run was categorized as successful run\")\n else:\n save_dir = tempfile.mkdtemp()\n print_fn(f\"Experiment could not be categorized, writing results to {save_dir}. Please open an issue on PEFT.\")\n\n if peft_config is None:\n peft_config_dict: Optional[dict[str, Any]] = None\n else:\n peft_config_dict = peft_config.to_dict()\n for key, value in peft_config_dict.items():\n if isinstance(value, set):\n peft_config_dict[key] = list(value)\n\n log_data = {\n \"run_info\": {\n \"created_at\": start_date,\n \"total_time\": time_total,\n \"experiment_name\": experiment_name,\n \"peft_branch\": peft_branch,\n \"train_config\": asdict(train_config),\n \"peft_config\": peft_config_dict,\n \"error_msg\": train_result.error_msg,\n },\n \"train_info\": {\n \"accelerator_memory_reserved_avg\": accelerator_memory_avg,\n \"accelerator_memory_max\": (accelerator_memory_final - accelerator_memory_init),\n \"accelerator_memory_reserved_99th\": accelerator_memory_reserved_99th,\n \"train_time\": train_result.train_time,\n \"file_size\": file_size,\n \"num_trainable_params\": train_result.num_trainable_params,\n \"num_total_params\": train_result.num_total_params,\n \"status\": train_result.status.value,\n \"metrics\": train_result.metrics,\n },\n \"meta_info\": {\n \"model_info\": {\"sha\": model_sha, \"created_at\": model_created_at},\n \"dataset_info\": dataset_info_log,\n **asdict(meta_info),\n },\n }\n\n log_to_console(log_data, print_fn=print) # use normal print to be able to redirect if so desired\n log_to_file(\n log_data=log_data, save_dir=save_dir, experiment_name=experiment_name, timestamp=start_date, print_fn=print_fn\n )","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.__post_init__","uri":"program://MetaMathQA/function/utils.__post_init__#L112-L136","kind":"function","name":"__post_init__","path":"utils.py","language":"python","start_line":112,"end_line":136,"context_start_line":92,"context_end_line":156,"code":"\n model_id: str\n dtype: Literal[\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]\n max_seq_length: int\n batch_size: int\n batch_size_eval: int\n max_steps: int\n eval_steps: int\n compile: bool\n query_template: str\n seed: int\n grad_norm_clip: float # set to 0 to skip\n optimizer_type: str\n optimizer_kwargs: dict[str, Any]\n lr_scheduler: Optional[Literal[\"cosine\"]]\n use_amp: bool\n autocast_adapter_dtype: bool\n generation_kwargs: dict[str, Any]\n attn_implementation: Optional[str]\n\n def __post_init__(self) -> None:\n if not isinstance(self.model_id, str):\n raise ValueError(f\"Invalid model_id: {self.model_id}\")\n if self.dtype not in [\"float32\", \"float16\", \"bfloat16\", \"int8\", \"int4\"]:\n raise ValueError(f\"Invalid dtype: {self.dtype}\")\n if self.max_seq_length < 0:\n raise ValueError(f\"Invalid max_seq_length: {self.max_seq_length}\")\n if self.batch_size <= 0:\n raise ValueError(f\"Invalid batch_size: {self.batch_size}\")\n if self.batch_size_eval <= 0:\n raise ValueError(f\"Invalid eval batch_size: {self.batch_size_eval}\")\n if self.max_steps <= 0:\n raise ValueError(f\"Invalid max_steps: {self.max_steps}\")\n if self.eval_steps <= 0:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps}\")\n if self.eval_steps > self.max_steps:\n raise ValueError(f\"Invalid eval_steps: {self.eval_steps} > max_steps: {self.max_steps}\")\n if self.grad_norm_clip < 0:\n raise ValueError(f\"Invalid grad_norm_clip: {self.grad_norm_clip}\")\n if self.optimizer_type not in [\"lora+\", \"lora-fa\"] and not hasattr(torch.optim, self.optimizer_type):\n raise ValueError(f\"Invalid optimizer_type: {self.optimizer_type}\")\n if self.lr_scheduler not in [None, \"cosine\"]:\n raise ValueError(f\"Invalid lr_scheduler: {self.lr_scheduler}, must be None or 'cosine'\")\n if \"{query}\" not in self.query_template:\n raise ValueError(\"Invalid query_template, must contain '{query}'\")\n\n\ndef validate_experiment_path(path: str) -> str:\n # the experiment path should take the form of ./experiments//\n # e.g. ./experiments/lora/rank32\n # it should contain:\n # - adapter_config.json\n # - optional: training_params.json\n if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS):\n raise FileNotFoundError(\n f\"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory\"\n )\n if not os.path.exists(path):\n raise FileNotFoundError(f\"Path {path} does not exist\")\n\n # check path structure\n path_parts = path.rstrip(os.path.sep).split(os.path.sep)\n if (len(path_parts) != 3) or (path_parts[-3] != \"experiments\"):\n raise ValueError(\n f\"Path {path} does not have the correct structure, should be ./experiments//\"","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.__init__","uri":"program://MetaMathQA/function/utils.__init__#L311-L317","kind":"function","name":"__init__","path":"utils.py","language":"python","start_line":311,"end_line":317,"context_start_line":291,"context_end_line":337,"code":" raise ValueError(f\"Invalid lr_scheduler argument: {lr_scheduler_arg}\")\n\n return optimizer, lr_scheduler\n\n\nclass BucketIterator:\n \"\"\"\n Iterator that yields batches of data from a torch Dataset, grouped in buckets by sequence length\n\n The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length.\n This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus\n introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`.\n\n Args:\n ds: The torch Dataset to iterate over\n batch_size: The batch size\n bucket_factor: The factor by which to multiply the batch size to determine the bucket size\n delete_cols: The columns to delete from the dataset before yielding a batch\n \"\"\"\n\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size\n self.bucket_factor = bucket_factor\n self.delete_cols = set(delete_cols)\n\n assert self.bucket_factor > 0, \"bucket_factor must be greater than 0\"\n\n def _batch_iterator(self, bucket):\n tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket[\"input_ids\"]])\n # sort long to short instead to encounter possible OOM errors as early as possible\n sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.get_last_lr","uri":"program://MetaMathQA/function/utils.get_last_lr#L265-L266","kind":"function","name":"get_last_lr","path":"utils.py","language":"python","start_line":265,"end_line":266,"context_start_line":245,"context_end_line":286,"code":" compile: bool,\n attn_implementation: Optional[str],\n peft_config: Optional[PeftConfig],\n autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.step","uri":"program://MetaMathQA/function/utils.step#L268-L269","kind":"function","name":"step","path":"utils.py","language":"python","start_line":268,"end_line":269,"context_start_line":248,"context_end_line":289,"code":" autocast_adapter_dtype: bool,\n) -> nn.Module:\n base_model = get_base_model(\n model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation\n )\n if peft_config is None:\n model = base_model\n else:\n model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype)\n return model\n\n\nclass DummyScheduler:\n # if no lr scheduler is being used\n def __init__(self, lr):\n self.lr = lr\n\n def get_last_lr(self):\n return [self.lr]\n\n def step(self):\n pass\n\n\ndef get_optimizer_and_scheduler(\n model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal[\"cosine\"]], **optimizer_kwargs\n) -> tuple[torch.optim.Optimizer, Any]:\n if optimizer_type == \"lora+\":\n optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs)\n elif optimizer_type == \"lora-fa\":\n optimizer = create_lorafa_optimizer(model, **optimizer_kwargs)\n else:\n cls = getattr(torch.optim, optimizer_type)\n optimizer = cls(model.parameters(), **optimizer_kwargs)\n\n if lr_scheduler_arg == \"cosine\":\n warmup_steps = int(WARMUP_STEP_RATIO * max_steps)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps\n )\n elif lr_scheduler_arg is None:\n lr_scheduler = DummyScheduler(optimizer_kwargs[\"lr\"])","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils._batch_iterator","uri":"program://MetaMathQA/function/utils._batch_iterator#L319-L328","kind":"function","name":"_batch_iterator","path":"utils.py","language":"python","start_line":319,"end_line":328,"context_start_line":299,"context_end_line":348,"code":"\n The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length.\n This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus\n introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`.\n\n Args:\n ds: The torch Dataset to iterate over\n batch_size: The batch size\n bucket_factor: The factor by which to multiply the batch size to determine the bucket size\n delete_cols: The columns to delete from the dataset before yielding a batch\n \"\"\"\n\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size\n self.bucket_factor = bucket_factor\n self.delete_cols = set(delete_cols)\n\n assert self.bucket_factor > 0, \"bucket_factor must be greater than 0\"\n\n def _batch_iterator(self, bucket):\n tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket[\"input_ids\"]])\n # sort long to short instead to encounter possible OOM errors as early as possible\n sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:\n bucket = self.ds[-(len(self.ds) % bucket_size) :]\n yield from self._batch_iterator(bucket)\n\n\ndef get_file_size(\n model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None]\n) -> int:\n file_size = 99999999 # set a default dummy value\n if peft_config is not None:\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir:","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:utils.__iter__","uri":"program://MetaMathQA/function/utils.__iter__#L330-L339","kind":"function","name":"__iter__","path":"utils.py","language":"python","start_line":330,"end_line":339,"context_start_line":310,"context_end_line":359,"code":"\n def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None:\n self.ds = ds\n self.batch_size = batch_size\n self.bucket_factor = bucket_factor\n self.delete_cols = set(delete_cols)\n\n assert self.bucket_factor > 0, \"bucket_factor must be greater than 0\"\n\n def _batch_iterator(self, bucket):\n tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket[\"input_ids\"]])\n # sort long to short instead to encounter possible OOM errors as early as possible\n sorted = torch.argsort(tokens_per_sample_bucket, descending=True)\n cls = type(bucket) # conserve the type returned by the ds\n bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols}\n num_samples = len(bucket[\"input_ids\"])\n for j in range(0, num_samples, self.batch_size):\n batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()}\n yield cls(batch)\n\n def __iter__(self):\n bucket_size = self.batch_size * self.bucket_factor\n for i in range(0, len(self.ds), bucket_size):\n bucket = self.ds[i : i + bucket_size]\n yield from self._batch_iterator(bucket)\n\n # if there is a remainder, we yield the last batch\n if len(self.ds) % bucket_size != 0:\n bucket = self.ds[-(len(self.ds) % bucket_size) :]\n yield from self._batch_iterator(bucket)\n\n\ndef get_file_size(\n model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None]\n) -> int:\n file_size = 99999999 # set a default dummy value\n if peft_config is not None:\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir:\n model.save_pretrained(tmp_dir)\n stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME))\n file_size = stat.st_size\n if not clean:\n print_fn(f\"Saved PEFT checkpoint to {tmp_dir}\")\n except Exception as exc:\n print(f\"Failed to save PEFT checkpoint due to the following error: {exc}\")\n else:\n print_fn(\"Not saving the fully fine-tuned model because it's too big, estimating the size instead\")\n try:\n num_params = model.num_parameters()","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:data","uri":"program://MetaMathQA/module/data#L1-L109","kind":"module","name":"data","path":"data.py","language":"python","start_line":1,"end_line":109,"context_start_line":1,"context_end_line":109,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAll utilities related to data handling.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Callable\n\nimport datasets\nimport numpy as np\nfrom datasets import Dataset, load_dataset\n\n\n# with a token limit of 768 for query + response, we have to exclude all texts with length > 1304; this leaves 93.8% of\n# the dataset\nCHAR_LIMIT = 1300\n# train/valid/test split -- note that evaluation takes quite long, so don't choose too large sizes for the valid set,\n# since it's run multiple times during training; test is only run once at the end and thus can be larger\nVALID_SIZE = 50\n\n\ndef get_filtered_dataset(*, ds: datasets.Dataset, print_fn: Callable[..., None]) -> Dataset:\n \"\"\"Return the filtered dataset, with long queries removed.\n\n We determined that 99% of queries have 529 or fewer characters. Characters roughly correspond to tokens, so this is\n a good proxy. We cannot use tokens directly, as that depends on the tokenizer, which can be different for each\n model, but we want the same filter for each model.\n\n \"\"\"\n char_lengths = [len(f\"{q} {r}\") for q, r in zip(ds[\"query\"], ds[\"response\"])]\n idx_filtered = [i for i, length in enumerate(char_lengths) if length <= CHAR_LIMIT]\n print_fn(f\"Filtered dataset: {100 * len(idx_filtered) / len(ds):.1f}% of the original dataset\")\n return ds.select(idx_filtered)\n\n\ndef get_train_valid_test_datasets(\n *, tokenizer, query_template: str, print_fn: Callable[..., None]\n) -> tuple[Dataset, Dataset, Dataset]:\n \"\"\"\n Return the indices of the train, valid, and test splits of the dataset.\n\n We cannot use ds.train_test_split(..., stratify_by_column=\"type\") as it gives:\n\n > ValueError: Stratifying by column is only supported for ClassLabel column, and column type is Value.\n\n even after calling ds_filtered.class_encode_column(\"type\"). Thus, using sklearn's StratifiedKFold instead.\n \"\"\"\n metamath = load_dataset(\"meta-math/MetaMathQA\")[\"train\"]\n metamath = get_filtered_dataset(ds=metamath, print_fn=print_fn)\n\n # gsmk8k does not need to be filtered as query and response are short enough\n gsm8k = load_dataset(\"openai/gsm8k\", \"main\")\n gsm8k = gsm8k.rename_columns({\"question\": \"query\", \"answer\": \"response\"})\n gsm8k_train = gsm8k[\"train\"]\n gsm8k_test = gsm8k[\"test\"]\n\n np.random.seed(0)\n indices = np.arange(len(gsm8k_train))\n np.random.shuffle(indices)\n idx_valid = indices[:VALID_SIZE]\n\n ds_train = metamath\n ds_valid = gsm8k_train.select(idx_valid)\n ds_test = gsm8k_test\n\n print_fn(f\"Train size: {len(ds_train)}\")\n print_fn(f\"Valid size: {len(ds_valid)}\")\n print_fn(f\"Test size: {len(ds_test)}\")\n\n tokenize_with_answer_ = partial(tokenize_with_answer, tokenizer=tokenizer, template=query_template)\n tokenize_wo_answer_ = partial(tokenize_wo_answer, tokenizer=tokenizer, template=query_template)\n ds_train = ds_train.map(tokenize_with_answer_, batched=True).remove_columns([\"type\", \"query\", \"original_question\"])\n ds_valid = ds_valid.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n ds_test = ds_test.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n\n return ds_train, ds_valid, ds_test\n\n\ndef tokenize_with_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) + answer for sample, answer in zip(samples[\"query\"], samples[\"response\"])]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized\n\n\ndef tokenize_wo_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) for sample in samples[\"query\"]]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:data.get_filtered_dataset","uri":"program://MetaMathQA/function/data.get_filtered_dataset#L35-L46","kind":"function","name":"get_filtered_dataset","path":"data.py","language":"python","start_line":35,"end_line":46,"context_start_line":15,"context_end_line":66,"code":"\"\"\"\nAll utilities related to data handling.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Callable\n\nimport datasets\nimport numpy as np\nfrom datasets import Dataset, load_dataset\n\n\n# with a token limit of 768 for query + response, we have to exclude all texts with length > 1304; this leaves 93.8% of\n# the dataset\nCHAR_LIMIT = 1300\n# train/valid/test split -- note that evaluation takes quite long, so don't choose too large sizes for the valid set,\n# since it's run multiple times during training; test is only run once at the end and thus can be larger\nVALID_SIZE = 50\n\n\ndef get_filtered_dataset(*, ds: datasets.Dataset, print_fn: Callable[..., None]) -> Dataset:\n \"\"\"Return the filtered dataset, with long queries removed.\n\n We determined that 99% of queries have 529 or fewer characters. Characters roughly correspond to tokens, so this is\n a good proxy. We cannot use tokens directly, as that depends on the tokenizer, which can be different for each\n model, but we want the same filter for each model.\n\n \"\"\"\n char_lengths = [len(f\"{q} {r}\") for q, r in zip(ds[\"query\"], ds[\"response\"])]\n idx_filtered = [i for i, length in enumerate(char_lengths) if length <= CHAR_LIMIT]\n print_fn(f\"Filtered dataset: {100 * len(idx_filtered) / len(ds):.1f}% of the original dataset\")\n return ds.select(idx_filtered)\n\n\ndef get_train_valid_test_datasets(\n *, tokenizer, query_template: str, print_fn: Callable[..., None]\n) -> tuple[Dataset, Dataset, Dataset]:\n \"\"\"\n Return the indices of the train, valid, and test splits of the dataset.\n\n We cannot use ds.train_test_split(..., stratify_by_column=\"type\") as it gives:\n\n > ValueError: Stratifying by column is only supported for ClassLabel column, and column type is Value.\n\n even after calling ds_filtered.class_encode_column(\"type\"). Thus, using sklearn's StratifiedKFold instead.\n \"\"\"\n metamath = load_dataset(\"meta-math/MetaMathQA\")[\"train\"]\n metamath = get_filtered_dataset(ds=metamath, print_fn=print_fn)\n\n # gsmk8k does not need to be filtered as query and response are short enough\n gsm8k = load_dataset(\"openai/gsm8k\", \"main\")\n gsm8k = gsm8k.rename_columns({\"question\": \"query\", \"answer\": \"response\"})","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:data.get_train_valid_test_datasets","uri":"program://MetaMathQA/function/data.get_train_valid_test_datasets#L49-L89","kind":"function","name":"get_train_valid_test_datasets","path":"data.py","language":"python","start_line":49,"end_line":89,"context_start_line":29,"context_end_line":109,"code":"CHAR_LIMIT = 1300\n# train/valid/test split -- note that evaluation takes quite long, so don't choose too large sizes for the valid set,\n# since it's run multiple times during training; test is only run once at the end and thus can be larger\nVALID_SIZE = 50\n\n\ndef get_filtered_dataset(*, ds: datasets.Dataset, print_fn: Callable[..., None]) -> Dataset:\n \"\"\"Return the filtered dataset, with long queries removed.\n\n We determined that 99% of queries have 529 or fewer characters. Characters roughly correspond to tokens, so this is\n a good proxy. We cannot use tokens directly, as that depends on the tokenizer, which can be different for each\n model, but we want the same filter for each model.\n\n \"\"\"\n char_lengths = [len(f\"{q} {r}\") for q, r in zip(ds[\"query\"], ds[\"response\"])]\n idx_filtered = [i for i, length in enumerate(char_lengths) if length <= CHAR_LIMIT]\n print_fn(f\"Filtered dataset: {100 * len(idx_filtered) / len(ds):.1f}% of the original dataset\")\n return ds.select(idx_filtered)\n\n\ndef get_train_valid_test_datasets(\n *, tokenizer, query_template: str, print_fn: Callable[..., None]\n) -> tuple[Dataset, Dataset, Dataset]:\n \"\"\"\n Return the indices of the train, valid, and test splits of the dataset.\n\n We cannot use ds.train_test_split(..., stratify_by_column=\"type\") as it gives:\n\n > ValueError: Stratifying by column is only supported for ClassLabel column, and column type is Value.\n\n even after calling ds_filtered.class_encode_column(\"type\"). Thus, using sklearn's StratifiedKFold instead.\n \"\"\"\n metamath = load_dataset(\"meta-math/MetaMathQA\")[\"train\"]\n metamath = get_filtered_dataset(ds=metamath, print_fn=print_fn)\n\n # gsmk8k does not need to be filtered as query and response are short enough\n gsm8k = load_dataset(\"openai/gsm8k\", \"main\")\n gsm8k = gsm8k.rename_columns({\"question\": \"query\", \"answer\": \"response\"})\n gsm8k_train = gsm8k[\"train\"]\n gsm8k_test = gsm8k[\"test\"]\n\n np.random.seed(0)\n indices = np.arange(len(gsm8k_train))\n np.random.shuffle(indices)\n idx_valid = indices[:VALID_SIZE]\n\n ds_train = metamath\n ds_valid = gsm8k_train.select(idx_valid)\n ds_test = gsm8k_test\n\n print_fn(f\"Train size: {len(ds_train)}\")\n print_fn(f\"Valid size: {len(ds_valid)}\")\n print_fn(f\"Test size: {len(ds_test)}\")\n\n tokenize_with_answer_ = partial(tokenize_with_answer, tokenizer=tokenizer, template=query_template)\n tokenize_wo_answer_ = partial(tokenize_wo_answer, tokenizer=tokenizer, template=query_template)\n ds_train = ds_train.map(tokenize_with_answer_, batched=True).remove_columns([\"type\", \"query\", \"original_question\"])\n ds_valid = ds_valid.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n ds_test = ds_test.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n\n return ds_train, ds_valid, ds_test\n\n\ndef tokenize_with_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) + answer for sample, answer in zip(samples[\"query\"], samples[\"response\"])]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized\n\n\ndef tokenize_wo_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) for sample in samples[\"query\"]]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:data.tokenize_with_answer","uri":"program://MetaMathQA/function/data.tokenize_with_answer#L92-L99","kind":"function","name":"tokenize_with_answer","path":"data.py","language":"python","start_line":92,"end_line":99,"context_start_line":72,"context_end_line":109,"code":" np.random.shuffle(indices)\n idx_valid = indices[:VALID_SIZE]\n\n ds_train = metamath\n ds_valid = gsm8k_train.select(idx_valid)\n ds_test = gsm8k_test\n\n print_fn(f\"Train size: {len(ds_train)}\")\n print_fn(f\"Valid size: {len(ds_valid)}\")\n print_fn(f\"Test size: {len(ds_test)}\")\n\n tokenize_with_answer_ = partial(tokenize_with_answer, tokenizer=tokenizer, template=query_template)\n tokenize_wo_answer_ = partial(tokenize_wo_answer, tokenizer=tokenizer, template=query_template)\n ds_train = ds_train.map(tokenize_with_answer_, batched=True).remove_columns([\"type\", \"query\", \"original_question\"])\n ds_valid = ds_valid.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n ds_test = ds_test.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n\n return ds_train, ds_valid, ds_test\n\n\ndef tokenize_with_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) + answer for sample, answer in zip(samples[\"query\"], samples[\"response\"])]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized\n\n\ndef tokenize_wo_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) for sample in samples[\"query\"]]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false} {"repo_id":"MetaMathQA","entity_id":"py:data.tokenize_wo_answer","uri":"program://MetaMathQA/function/data.tokenize_wo_answer#L102-L109","kind":"function","name":"tokenize_wo_answer","path":"data.py","language":"python","start_line":102,"end_line":109,"context_start_line":82,"context_end_line":109,"code":"\n tokenize_with_answer_ = partial(tokenize_with_answer, tokenizer=tokenizer, template=query_template)\n tokenize_wo_answer_ = partial(tokenize_wo_answer, tokenizer=tokenizer, template=query_template)\n ds_train = ds_train.map(tokenize_with_answer_, batched=True).remove_columns([\"type\", \"query\", \"original_question\"])\n ds_valid = ds_valid.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n ds_test = ds_test.map(tokenize_wo_answer_, batched=True).remove_columns([\"query\"])\n\n return ds_train, ds_valid, ds_test\n\n\ndef tokenize_with_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) + answer for sample, answer in zip(samples[\"query\"], samples[\"response\"])]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized\n\n\ndef tokenize_wo_answer(samples, tokenizer, template):\n queries = [template.format(query=sample) for sample in samples[\"query\"]]\n tokenized = tokenizer(queries)\n tokenized[\"input_ids\"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"input_ids\"]]\n tokenized[\"attention_mask\"] = [\n input_ids[: tokenizer.model_max_length] for input_ids in tokenized[\"attention_mask\"]\n ]\n return tokenized","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false} {"repo_id":"MetaMathQA","entity_id":"file:run.py","uri":"program://MetaMathQA/file/run.py","kind":"file","name":"run.py","path":"run.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMain entry point to run the experiments. Contains general setup and the proper training code.\n\"\"\"\n\nimport argparse\nimport datetime as dt\nimport gc","source_hash":"67e991c2fdb0b9aaf287b75353af3155b9a4804ec069dd9627957e82a1ab7e96","truncated":false} {"repo_id":"MetaMathQA","entity_id":"file:utils.py","uri":"program://MetaMathQA/file/utils.py","kind":"file","name":"utils.py","path":"utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAll utilities not related to data handling.\n\"\"\"\n\nimport enum\nimport json\nimport os","source_hash":"cf82d461c04ea10492b64e8efa2c5969022d78e2303f636464d2c82dbe180d49","truncated":false} {"repo_id":"MetaMathQA","entity_id":"file:data.py","uri":"program://MetaMathQA/file/data.py","kind":"file","name":"data.py","path":"data.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright 2025-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAll utilities related to data handling.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Callable\n","source_hash":"f5c0a1ce9c0773bb7118a761ecd50dd3987f57edc07301a0d74037fb659f3d79","truncated":false}