code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def A_ ( _UpperCAmelCase ): return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def A_ ( ): SCREAMING_SNAKE_CASE_: Union[str, Any] = ArgumentParser( "HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_UpperCAmelCase ) EnvironmentCommand.register_subcommand(_UpperCAmelCase ) TestCommand.register_subcommand(_UpperCAmelCase ) RunBeamCommand.register_subcommand(_UpperCAmelCase ) DummyDataCommand.register_subcommand(_UpperCAmelCase ) # Parse args SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = parser.parse_known_args() if not hasattr(_UpperCAmelCase , "func" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE_: Tuple = parse_unknown_args(_UpperCAmelCase ) # Run SCREAMING_SNAKE_CASE_: List[str] = args.func(_UpperCAmelCase , **_UpperCAmelCase ) service.run() if __name__ == "__main__": main()
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
from __future__ import annotations def A_ ( _UpperCAmelCase , _UpperCAmelCase ): if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = number_of_bytes // partitions SCREAMING_SNAKE_CASE_: int = [] for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = i * bytes_per_partition + 1 SCREAMING_SNAKE_CASE_: List[Any] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f"{start_bytes}-{end_bytes}" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : str = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase : Optional[int] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""") lowerCAmelCase : List[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowerCAmelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def A_ ( _UpperCAmelCase ): with open(_UpperCAmelCase , "rb" ) as f: SCREAMING_SNAKE_CASE_: Dict = Image.open(_UpperCAmelCase ) return im.convert("RGB" ) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } , ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'''help''': '''A folder containing the training data.'''} ) _UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'''help''': '''A folder containing the validation data.'''} ) _UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) _UpperCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _UpperCAmelCase : Optional[int] = field( default=UpperCAmelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory.") @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : str = field( default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase_ )} , ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) _UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _UpperCAmelCase : str = field(default=UpperCAmelCase_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _UpperCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _UpperCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = torch.stack([example["pixel_values"] for example in examples] ) SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def A_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE_: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , _UpperCAmelCase , _UpperCAmelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Union[str, Any] = training_args.get_process_log_level() logger.setLevel(_UpperCAmelCase ) transformers.utils.logging.set_verbosity(_UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE_: Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE_: Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: SCREAMING_SNAKE_CASE_: Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: SCREAMING_SNAKE_CASE_: int = {} if data_args.train_dir is not None: SCREAMING_SNAKE_CASE_: Dict = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(data_args.validation_dir , "**" ) SCREAMING_SNAKE_CASE_: Optional[int] = load_dataset( "imagefolder" , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. SCREAMING_SNAKE_CASE_: int = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0: SCREAMING_SNAKE_CASE_: Optional[Any] = dataset["train"].train_test_split(data_args.train_val_split ) SCREAMING_SNAKE_CASE_: List[str] = split["train"] SCREAMING_SNAKE_CASE_: Union[str, Any] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. SCREAMING_SNAKE_CASE_: Any = dataset["train"].features["labels"].names SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = {}, {} for i, label in enumerate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = str(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = label # Load the accuracy metric from the datasets package SCREAMING_SNAKE_CASE_: List[Any] = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCAmelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel=_UpperCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE_: List[str] = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) SCREAMING_SNAKE_CASE_: List[Any] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: SCREAMING_SNAKE_CASE_: Optional[int] = image_processor.size["shortest_edge"] else: SCREAMING_SNAKE_CASE_: Any = (image_processor.size["height"], image_processor.size["width"]) SCREAMING_SNAKE_CASE_: List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) SCREAMING_SNAKE_CASE_: Any = Compose( [ RandomResizedCrop(_UpperCAmelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) SCREAMING_SNAKE_CASE_: int = Compose( [ Resize(_UpperCAmelCase ), CenterCrop(_UpperCAmelCase ), ToTensor(), normalize, ] ) def train_transforms(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE_: str = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_UpperCAmelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE_: int = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_UpperCAmelCase ) # Initalize our trainer SCREAMING_SNAKE_CASE_: str = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE_: Union[str, Any] = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE_: int = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE_: int = last_checkpoint SCREAMING_SNAKE_CASE_: Any = trainer.train(resume_from_checkpoint=_UpperCAmelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: SCREAMING_SNAKE_CASE_: List[str] = trainer.evaluate() trainer.log_metrics("eval" , _UpperCAmelCase ) trainer.save_metrics("eval" , _UpperCAmelCase ) # Write model card and (optionally) push to hub SCREAMING_SNAKE_CASE_: Dict = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCAmelCase ) else: trainer.create_model_card(**_UpperCAmelCase ) if __name__ == "__main__": main()
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[str] = (DPMSolverSinglestepScheduler,) _UpperCAmelCase : Tuple = (('''num_inference_steps''', 25),) def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf"), "variance_type": None, } config.update(**lowerCAmelCase__) return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Union[str, Any]=0 , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Optional[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_: Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = self.dummy_sample SCREAMING_SNAKE_CASE_: List[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_: Dict = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: Any = self.get_scheduler_config(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals SCREAMING_SNAKE_CASE_: List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = scheduler_class.from_pretrained(lowerCAmelCase__) new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals SCREAMING_SNAKE_CASE_: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = sample, sample for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1): SCREAMING_SNAKE_CASE_: Dict = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Union[str, Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : int): pass def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[str, Any]=0 , **lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: int = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_: Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_sample SCREAMING_SNAKE_CASE_: int = 0.1 * sample SCREAMING_SNAKE_CASE_: List[str] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_: Union[str, Any] = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_: List[str] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = scheduler_class.from_pretrained(lowerCAmelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_: int = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE_: int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : int): if scheduler is None: SCREAMING_SNAKE_CASE_: Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: Optional[int] = self.get_scheduler_config(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = scheduler_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_scheduler_config(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = scheduler_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = 10 SCREAMING_SNAKE_CASE_: List[str] = self.dummy_model() SCREAMING_SNAKE_CASE_: Dict = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) SCREAMING_SNAKE_CASE_: str = 50 SCREAMING_SNAKE_CASE_: Dict = self.dummy_model() SCREAMING_SNAKE_CASE_: int = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:]): SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Dict = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2574) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : str): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): # make sure that iterating over schedulers with same config names gives same results # for defaults SCREAMING_SNAKE_CASE_: str = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) SCREAMING_SNAKE_CASE_: List[Any] = self.full_loop(scheduler=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1E-3 SCREAMING_SNAKE_CASE_: Dict = DEISMultistepScheduler.from_config(scheduler.config) SCREAMING_SNAKE_CASE_: Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config) SCREAMING_SNAKE_CASE_: Tuple = UniPCMultistepScheduler.from_config(scheduler.config) SCREAMING_SNAKE_CASE_: List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config) SCREAMING_SNAKE_CASE_: Optional[int] = self.full_loop(scheduler=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): self.check_over_configs(thresholding=lowerCAmelCase__) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[Any] = self.full_loop( solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , ) assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers" def _SCREAMING_SNAKE_CASE ( self : List[Any]): self.check_over_configs(lower_order_final=lowerCAmelCase__) self.check_over_configs(lower_order_final=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): self.check_over_configs(lambda_min_clipped=-float("inf")) self.check_over_configs(lambda_min_clipped=-5.1) def _SCREAMING_SNAKE_CASE ( self : Any): self.check_over_configs(variance_type=lowerCAmelCase__) self.check_over_configs(variance_type="learned_range") def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.full_loop() SCREAMING_SNAKE_CASE_: List[Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2791) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = self.full_loop(use_karras_sigmas=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.2248) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = self.full_loop(prediction_type="v_prediction") SCREAMING_SNAKE_CASE_: int = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.1453) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_mean.item() - 0.0649) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: List[Any] = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0) SCREAMING_SNAKE_CASE_: Tuple = scheduler_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 10 SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE_: int = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.timesteps): SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample assert sample.dtype == torch.floataa
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase : Optional[Any] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = ["""DeiTFeatureExtractor"""] lowerCAmelCase : int = ["""DeiTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DeiTForImageClassification""", """DeiTForImageClassificationWithTeacher""", """DeiTForMaskedImageModeling""", """DeiTModel""", """DeiTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDeiTForImageClassification""", """TFDeiTForImageClassificationWithTeacher""", """TFDeiTForMaskedImageModeling""", """TFDeiTModel""", """TFDeiTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
import math def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 ): SCREAMING_SNAKE_CASE_: str = end or len(_UpperCAmelCase ) for i in range(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = i SCREAMING_SNAKE_CASE_: Optional[Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: SCREAMING_SNAKE_CASE_: int = array[temp_index - 1] temp_index -= 1 SCREAMING_SNAKE_CASE_: Any = temp_index_value return array def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Max Heap SCREAMING_SNAKE_CASE_: List[str] = index SCREAMING_SNAKE_CASE_: List[Any] = 2 * index + 1 # Left Node SCREAMING_SNAKE_CASE_: Dict = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: SCREAMING_SNAKE_CASE_: Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: SCREAMING_SNAKE_CASE_: Optional[Any] = right_index if largest != index: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = array[largest], array[index] heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase ) for i in range(n // 2 , -1 , -1 ): heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for i in range(n - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = array[0], array[i] heapify(_UpperCAmelCase , 0 , _UpperCAmelCase ) return array def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = low SCREAMING_SNAKE_CASE_: Union[str, Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = array[j], array[i] i += 1 def A_ ( _UpperCAmelCase ): if len(_UpperCAmelCase ) == 0: return array SCREAMING_SNAKE_CASE_: Union[str, Any] = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Dict = 16 return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): while end - start > size_threshold: if max_depth == 0: return heap_sort(_UpperCAmelCase ) max_depth -= 1 SCREAMING_SNAKE_CASE_: Dict = median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 ) SCREAMING_SNAKE_CASE_: Union[str, Any] = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = p return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Dict = input("""Enter numbers separated by a comma : """).strip() lowerCAmelCase : Tuple = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
from jiwer import compute_measures import datasets lowerCAmelCase : Any = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ lowerCAmelCase : Tuple = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ lowerCAmelCase : Dict = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence"), "references": datasets.Value("string" , id="sequence"), }) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any]=False): if concatenate_texts: return compute_measures(lowerCAmelCase__ , lowerCAmelCase__)["wer"] else: SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: Any = 0 for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[str] = compute_measures(lowerCAmelCase__ , lowerCAmelCase__) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase : Optional[Any] = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : int = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[Any] = { """unc-nlp/lxmert-base-uncased""": 512, } lowerCAmelCase : Tuple = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = LxmertTokenizer def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[int]="[UNK]" , lowerCAmelCase__ : Union[str, Any]="[SEP]" , lowerCAmelCase__ : Optional[Any]="[PAD]" , lowerCAmelCase__ : Optional[int]="[CLS]" , lowerCAmelCase__ : int="[MASK]" , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : int , ): super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , normalizer_state.pop("type")) SCREAMING_SNAKE_CASE_: List[str] = do_lower_case SCREAMING_SNAKE_CASE_: List[str] = strip_accents SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenize_chinese_chars SCREAMING_SNAKE_CASE_: Tuple = normalizer_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = do_lower_case def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]=None): SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: int = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): SCREAMING_SNAKE_CASE_: Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__) return tuple(lowerCAmelCase__)
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = XCLIPTextConfig() # derive patch size from model name SCREAMING_SNAKE_CASE_: Optional[Any] = model_name.find("patch" ) SCREAMING_SNAKE_CASE_: int = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] ) SCREAMING_SNAKE_CASE_: List[Any] = XCLIPVisionConfig(patch_size=_UpperCAmelCase , num_frames=_UpperCAmelCase ) if "large" in model_name: SCREAMING_SNAKE_CASE_: str = 7_68 SCREAMING_SNAKE_CASE_: List[str] = 30_72 SCREAMING_SNAKE_CASE_: int = 12 SCREAMING_SNAKE_CASE_: int = 10_24 SCREAMING_SNAKE_CASE_: int = 40_96 SCREAMING_SNAKE_CASE_: Any = 16 SCREAMING_SNAKE_CASE_: Tuple = 24 SCREAMING_SNAKE_CASE_: Any = 7_68 SCREAMING_SNAKE_CASE_: Optional[int] = 30_72 if model_name == "xclip-large-patch14-16-frames": SCREAMING_SNAKE_CASE_: Any = 3_36 SCREAMING_SNAKE_CASE_: str = XCLIPConfig.from_text_vision_configs(_UpperCAmelCase , _UpperCAmelCase ) if "large" in model_name: SCREAMING_SNAKE_CASE_: Dict = 7_68 return config def A_ ( _UpperCAmelCase ): # text encoder if name == "token_embedding.weight": SCREAMING_SNAKE_CASE_: Any = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" ) if name == "positional_embedding": SCREAMING_SNAKE_CASE_: Any = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "ln_1" in name: SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: SCREAMING_SNAKE_CASE_: Tuple = name.replace("c_fc" , "fc1" ) if "c_proj" in name: SCREAMING_SNAKE_CASE_: Tuple = name.replace("c_proj" , "fc2" ) if name.startswith("transformer.resblocks" ): SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("transformer.resblocks" , "text_model.encoder.layers" ) if "attn.out_proj" in name and "message" not in name: SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("attn.out_proj" , "self_attn.out_proj" ) if "ln_final" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("ln_final" , "text_model.final_layer_norm" ) # visual encoder if name == "visual.class_embedding": SCREAMING_SNAKE_CASE_: str = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" ) if name == "visual.positional_embedding": SCREAMING_SNAKE_CASE_: Dict = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" ) if name.startswith("visual.transformer.resblocks" ): SCREAMING_SNAKE_CASE_: int = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" ) if "visual.conv1" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" ) if "visual.ln_pre" in name: SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" ) if "visual.ln_post" in name: SCREAMING_SNAKE_CASE_: Dict = name.replace("visual.ln_post" , "vision_model.post_layernorm" ) if "visual.proj" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("visual.proj" , "visual_projection.weight" ) if "text_projection" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("text_projection" , "text_projection.weight" ) # things on top if "prompts_visual_proj" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("prompts_visual_proj" , "prompts_visual_projection" ) if "prompts_visual_ln" in name: SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" ) # mit if name == "mit.positional_embedding": SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("positional" , "position" ) if name.startswith("mit.resblocks" ): SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace("mit.resblocks" , "mit.encoder.layers" ) # prompts generator if name.startswith("prompts_generator.norm" ): SCREAMING_SNAKE_CASE_: str = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" ) return name def A_ ( _UpperCAmelCase , _UpperCAmelCase ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_: str = orig_state_dict.pop(_UpperCAmelCase ) if "attn.in_proj" in key: SCREAMING_SNAKE_CASE_: Optional[int] = key.split("." ) if key.startswith("visual" ): SCREAMING_SNAKE_CASE_: Tuple = key_split[3] SCREAMING_SNAKE_CASE_: Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: SCREAMING_SNAKE_CASE_: Tuple = val[ :dim, : ] SCREAMING_SNAKE_CASE_: Dict = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: int = val[ -dim:, : ] else: SCREAMING_SNAKE_CASE_: List[str] = val[ :dim ] SCREAMING_SNAKE_CASE_: str = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: int = val[ -dim: ] else: if "weight" in key: SCREAMING_SNAKE_CASE_: str = val[ :dim, : ] SCREAMING_SNAKE_CASE_: Optional[Any] = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: List[str] = val[ -dim:, : ] else: SCREAMING_SNAKE_CASE_: Tuple = val[:dim] SCREAMING_SNAKE_CASE_: Union[str, Any] = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: Tuple = val[-dim:] elif key.startswith("mit" ): SCREAMING_SNAKE_CASE_: int = key_split[2] SCREAMING_SNAKE_CASE_: List[Any] = config.vision_config.mit_hidden_size if "weight" in key: SCREAMING_SNAKE_CASE_: int = val[:dim, :] SCREAMING_SNAKE_CASE_: List[str] = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_: Dict = val[-dim:, :] else: SCREAMING_SNAKE_CASE_: List[str] = val[:dim] SCREAMING_SNAKE_CASE_: List[str] = val[dim : dim * 2] SCREAMING_SNAKE_CASE_: int = val[-dim:] else: SCREAMING_SNAKE_CASE_: Tuple = key_split[2] SCREAMING_SNAKE_CASE_: List[str] = config.text_config.hidden_size if "weight" in key: SCREAMING_SNAKE_CASE_: Tuple = val[:dim, :] SCREAMING_SNAKE_CASE_: List[Any] = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: List[Any] = val[-dim:, :] else: SCREAMING_SNAKE_CASE_: Tuple = val[:dim] SCREAMING_SNAKE_CASE_: int = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: Optional[int] = val[-dim:] else: SCREAMING_SNAKE_CASE_: str = rename_key(_UpperCAmelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: SCREAMING_SNAKE_CASE_: Union[str, Any] = val.T SCREAMING_SNAKE_CASE_: List[Any] = val return orig_state_dict def A_ ( _UpperCAmelCase ): if num_frames == 8: SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti_8_frames.npy" elif num_frames == 16: SCREAMING_SNAKE_CASE_: Union[str, Any] = "eating_spaghetti.npy" elif num_frames == 32: SCREAMING_SNAKE_CASE_: str = "eating_spaghetti_32_frames.npy" SCREAMING_SNAKE_CASE_: Union[str, Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename=_UpperCAmelCase , repo_type="dataset" , ) SCREAMING_SNAKE_CASE_: Any = np.load(_UpperCAmelCase ) return list(_UpperCAmelCase ) def A_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_: Optional[Any] = { # fully supervised kinetics-400 checkpoints "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", "xclip-base-patch32-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" ), "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", "xclip-base-patch16-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" ), "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb", "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f", # fully supervised kinetics-600 checkpoints "xclip-base-patch16-kinetics-600": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" ), "xclip-base-patch16-kinetics-600-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" ), "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be", # few shot "xclip-base-patch16-hmdb-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" ), "xclip-base-patch16-hmdb-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" ), "xclip-base-patch16-hmdb-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" ), "xclip-base-patch16-hmdb-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" ), "xclip-base-patch16-ucf-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" ), "xclip-base-patch16-ucf-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" ), "xclip-base-patch16-ucf-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" ), "xclip-base-patch16-ucf-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" ), # zero shot "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", } SCREAMING_SNAKE_CASE_: Tuple = model_to_url[model_name] SCREAMING_SNAKE_CASE_: Optional[Any] = 8 if "16-frames" in model_name: SCREAMING_SNAKE_CASE_: str = 16 elif "shot" in model_name: SCREAMING_SNAKE_CASE_: Any = 32 SCREAMING_SNAKE_CASE_: Any = get_xclip_config(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = XCLIPModel(_UpperCAmelCase ) model.eval() if "drive" in checkpoint_url: SCREAMING_SNAKE_CASE_: Dict = "pytorch_model.bin" gdown.cached_download(_UpperCAmelCase , _UpperCAmelCase , quiet=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(_UpperCAmelCase , map_location="cpu" )["model"] else: SCREAMING_SNAKE_CASE_: Any = torch.hub.load_state_dict_from_url(_UpperCAmelCase )["model"] SCREAMING_SNAKE_CASE_: Union[str, Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = XCLIPModel(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() SCREAMING_SNAKE_CASE_: Dict = 3_36 if model_name == "xclip-large-patch14-16-frames" else 2_24 SCREAMING_SNAKE_CASE_: List[Any] = VideoMAEImageProcessor(size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" ) SCREAMING_SNAKE_CASE_: int = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" ) SCREAMING_SNAKE_CASE_: str = XCLIPProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = prepare_video(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = processor( text=["playing sports", "eating spaghetti", "go shopping"] , videos=_UpperCAmelCase , return_tensors="pt" , padding=_UpperCAmelCase ) print("Shape of pixel values:" , inputs.pixel_values.shape ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase ) # Verify outputs SCREAMING_SNAKE_CASE_: Tuple = outputs.logits_per_video SCREAMING_SNAKE_CASE_: str = logits_per_video.softmax(dim=1 ) print("Probs:" , _UpperCAmelCase ) # kinetics-400 if model_name == "xclip-base-patch32": SCREAMING_SNAKE_CASE_: str = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] ) elif model_name == "xclip-base-patch16": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": SCREAMING_SNAKE_CASE_: int = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] ) elif model_name == "xclip-large-patch14": SCREAMING_SNAKE_CASE_: str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": SCREAMING_SNAKE_CASE_: str = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": SCREAMING_SNAKE_CASE_: Dict = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": SCREAMING_SNAKE_CASE_: str = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": SCREAMING_SNAKE_CASE_: Dict = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": SCREAMING_SNAKE_CASE_: Any = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": SCREAMING_SNAKE_CASE_: Any = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": SCREAMING_SNAKE_CASE_: Dict = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] ) else: raise ValueError(f"Model name {model_name} not supported" ) assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub..." ) model.push_to_hub(_UpperCAmelCase , organization="nielsr" ) processor.push_to_hub(_UpperCAmelCase , organization="nielsr" ) slow_tokenizer.push_to_hub(_UpperCAmelCase , organization="nielsr" ) if __name__ == "__main__": lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets lowerCAmelCase : Dict = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ lowerCAmelCase : Optional[Any] = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ lowerCAmelCase : int = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence"), "references": datasets.Value("string" , id="sequence"), }) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]="auto" , lowerCAmelCase__ : Dict=-1 , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Tuple=500 , lowerCAmelCase__ : Dict="gpt2-large" , lowerCAmelCase__ : List[str]=-1 , lowerCAmelCase__ : Any=1024 , lowerCAmelCase__ : Any=25 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=25 , ): SCREAMING_SNAKE_CASE_: Optional[int] = compute_mauve( p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , ) return out
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase : Any = '''Pix2StructImageProcessor''' _UpperCAmelCase : int = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: str = False super().__init__(lowerCAmelCase__ , lowerCAmelCase__) def __call__( self : int , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = 2048 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : str , ): if images is None and text is None: raise ValueError("You have to specify either images or text.") # Get only text if images is None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer SCREAMING_SNAKE_CASE_: Dict = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor( lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__) else: # add pixel_values and bbox SCREAMING_SNAKE_CASE_: Any = self.image_processor( lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__) if text is not None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_: int = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if "attention_mask" in text_encoding: SCREAMING_SNAKE_CASE_: List[str] = text_encoding.pop("attention_mask") if "input_ids" in text_encoding: SCREAMING_SNAKE_CASE_: List[Any] = text_encoding.pop("input_ids") else: SCREAMING_SNAKE_CASE_: Union[str, Any] = None if text_encoding is not None: encoding_image_processor.update(lowerCAmelCase__) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Dict): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any]): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__) @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_: int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase : Any = '''BlipImageProcessor''' _UpperCAmelCase : int = '''AutoTokenizer''' def __init__( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]): super().__init__(lowerCAmelCase__ , lowerCAmelCase__) # add QFormer tokenizer SCREAMING_SNAKE_CASE_: Optional[Any] = qformer_tokenizer def __call__( self : Any , lowerCAmelCase__ : ImageInput = None , lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : int , ): if images is None and text is None: raise ValueError("You have to specify at least images or text.") SCREAMING_SNAKE_CASE_: Union[str, Any] = BatchFeature() if text is not None: SCREAMING_SNAKE_CASE_: Union[str, Any] = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) encoding.update(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self.qformer_tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[Any] = qformer_text_encoding.pop("input_ids") SCREAMING_SNAKE_CASE_: List[Any] = qformer_text_encoding.pop("attention_mask") if images is not None: SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__) encoding.update(lowerCAmelCase__) return encoding def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[Any]): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : str): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_: List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[Any]): if os.path.isfile(lowerCAmelCase__): raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(lowerCAmelCase__ , "qformer_tokenizer") self.qformer_tokenizer.save_pretrained(lowerCAmelCase__) return super().save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ , subfolder="qformer_tokenizer") SCREAMING_SNAKE_CASE_: Any = cls._get_arguments_from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) args.append(lowerCAmelCase__) return cls(*lowerCAmelCase__)
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase : Tuple = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : int=18 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : Optional[int]=400 , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[int]=None , ): SCREAMING_SNAKE_CASE_: str = size if size is not None else {"height": 20, "width": 20} SCREAMING_SNAKE_CASE_: Tuple = parent SCREAMING_SNAKE_CASE_: str = batch_size SCREAMING_SNAKE_CASE_: Optional[Any] = num_channels SCREAMING_SNAKE_CASE_: Any = image_size SCREAMING_SNAKE_CASE_: Optional[int] = min_resolution SCREAMING_SNAKE_CASE_: Optional[Any] = max_resolution SCREAMING_SNAKE_CASE_: Union[str, Any] = size SCREAMING_SNAKE_CASE_: Any = do_normalize SCREAMING_SNAKE_CASE_: Any = do_convert_rgb SCREAMING_SNAKE_CASE_: Dict = [512, 1024, 2048, 4096] SCREAMING_SNAKE_CASE_: Tuple = patch_size if patch_size is not None else {"height": 16, "width": 16} def _SCREAMING_SNAKE_CASE ( self : Tuple): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: int = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" SCREAMING_SNAKE_CASE_: Any = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__).raw).convert("RGB") return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : List[str] = PixaStructImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Tuple = PixaStructImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Tuple): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize")) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb")) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: List[Any] = self.image_processor_tester.prepare_dummy_image() SCREAMING_SNAKE_CASE_: str = self.image_processing_class(**self.image_processor_dict) SCREAMING_SNAKE_CASE_: Optional[int] = 2048 SCREAMING_SNAKE_CASE_: Dict = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606) , atol=1E-3 , rtol=1E-3)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): # Initialize image_processor SCREAMING_SNAKE_CASE_: Tuple = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Tuple = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE_: str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _SCREAMING_SNAKE_CASE ( self : Any): # Initialize image_processor SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 SCREAMING_SNAKE_CASE_: Any = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches SCREAMING_SNAKE_CASE_: Tuple = "Hello" SCREAMING_SNAKE_CASE_: List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE_: Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _SCREAMING_SNAKE_CASE ( self : Dict): # Initialize image_processor SCREAMING_SNAKE_CASE_: str = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) SCREAMING_SNAKE_CASE_: int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE_: int = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): # Initialize image_processor SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_: int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE_: int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE_: Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[Any] = PixaStructImageProcessingTester(self , num_channels=4) SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize")) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb")) def _SCREAMING_SNAKE_CASE ( self : List[str]): # Initialize image_processor SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Dict = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input SCREAMING_SNAKE_CASE_: Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched SCREAMING_SNAKE_CASE_: Any = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase : Any = NewType("""DataClass""", Any) lowerCAmelCase : Union[str, Any] = NewType("""DataClassType""", Any) def A_ ( _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = {str(_UpperCAmelCase ): choice for choice in choices} return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase ) def A_ ( *, _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls SCREAMING_SNAKE_CASE_: Any = {} if aliases is not None: SCREAMING_SNAKE_CASE_: Dict = aliases if help is not None: SCREAMING_SNAKE_CASE_: int = help return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Iterable[DataClassType] def __init__( self : str , lowerCAmelCase__ : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase__ : Optional[Any]): # To make the default appear when using --help if "formatter_class" not in kwargs: SCREAMING_SNAKE_CASE_: Dict = ArgumentDefaultsHelpFormatter super().__init__(**lowerCAmelCase__) if dataclasses.is_dataclass(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = [dataclass_types] SCREAMING_SNAKE_CASE_: Optional[Any] = list(lowerCAmelCase__) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : ArgumentParser , lowerCAmelCase__ : dataclasses.Field): SCREAMING_SNAKE_CASE_: int = F"--{field.name}" SCREAMING_SNAKE_CASE_: Optional[int] = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , lowerCAmelCase__): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default") SCREAMING_SNAKE_CASE_: str = kwargs.pop("aliases" , []) if isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: int = [aliases] SCREAMING_SNAKE_CASE_: Union[str, Any] = getattr(field.type , "__origin__" , field.type) if origin_type is Union or (hasattr(lowerCAmelCase__ , "UnionType") and isinstance(lowerCAmelCase__ , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(lowerCAmelCase__) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." F" Problem encountered in field '{field.name}'.") if type(lowerCAmelCase__) not in field.type.__args__: # filter `str` in Union SCREAMING_SNAKE_CASE_: int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] SCREAMING_SNAKE_CASE_: int = getattr(field.type , "__origin__" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) SCREAMING_SNAKE_CASE_: List[str] = ( field.type.__args__[0] if isinstance(lowerCAmelCase__ , field.type.__args__[1]) else field.type.__args__[1] ) SCREAMING_SNAKE_CASE_: Any = getattr(field.type , "__origin__" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) SCREAMING_SNAKE_CASE_: Any = {} if origin_type is Literal or (isinstance(field.type , lowerCAmelCase__) and issubclass(field.type , lowerCAmelCase__)): if origin_type is Literal: SCREAMING_SNAKE_CASE_: int = field.type.__args__ else: SCREAMING_SNAKE_CASE_: Dict = [x.value for x in field.type] SCREAMING_SNAKE_CASE_: List[Any] = make_choice_type_function(kwargs["choices"]) if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_: Any = field.default else: SCREAMING_SNAKE_CASE_: Dict = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument SCREAMING_SNAKE_CASE_: Any = copy(lowerCAmelCase__) # Hack because type=bool in argparse does not behave as we want. SCREAMING_SNAKE_CASE_: List[str] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. SCREAMING_SNAKE_CASE_: int = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way SCREAMING_SNAKE_CASE_: Union[str, Any] = default # This tells argparse we accept 0 or 1 value after --field_name SCREAMING_SNAKE_CASE_: List[str] = "?" # This is the value that will get picked if we do --field_name (without value) SCREAMING_SNAKE_CASE_: Tuple = True elif isclass(lowerCAmelCase__) and issubclass(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[Any] = field.type.__args__[0] SCREAMING_SNAKE_CASE_: Optional[int] = "+" if field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_: Optional[int] = field.default_factory() elif field.default is dataclasses.MISSING: SCREAMING_SNAKE_CASE_: int = True else: SCREAMING_SNAKE_CASE_: Optional[int] = field.type if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_: int = field.default elif field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_: str = field.default_factory() else: SCREAMING_SNAKE_CASE_: List[Any] = True parser.add_argument(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): SCREAMING_SNAKE_CASE_: Tuple = False parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : DataClassType): if hasattr(lowerCAmelCase__ , "_argument_group_name"): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.add_argument_group(dtype._argument_group_name) else: SCREAMING_SNAKE_CASE_: Any = self try: SCREAMING_SNAKE_CASE_: Dict[str, type] = get_type_hints(lowerCAmelCase__) except NameError: raise RuntimeError( F"Type resolution failed for {dtype}. Try declaring the class in global scope or " "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[Any] = ".".join(map(lowerCAmelCase__ , sys.version_info[:3])) raise RuntimeError( F"Type resolution failed for {dtype} on Python {python_version}. Try removing " "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`.") from ex raise for field in dataclasses.fields(lowerCAmelCase__): if not field.init: continue SCREAMING_SNAKE_CASE_: Optional[int] = type_hints[field.name] self._parse_dataclass_field(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): SCREAMING_SNAKE_CASE_: List[Any] = [] if args_filename: args_files.append(Path(lowerCAmelCase__)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(".args")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values SCREAMING_SNAKE_CASE_: Dict = ArgumentParser() args_file_parser.add_argument(lowerCAmelCase__ , type=lowerCAmelCase__ , action="append") # Use only remaining args for further parsing (remove the args_file_flag) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = args_file_parser.parse_known_args(args=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = vars(lowerCAmelCase__).get(args_file_flag.lstrip("-") , lowerCAmelCase__) if cmd_args_file_paths: args_files.extend([Path(lowerCAmelCase__) for p in cmd_args_file_paths]) SCREAMING_SNAKE_CASE_: str = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last SCREAMING_SNAKE_CASE_: Dict = file_args + args if args is not None else file_args + sys.argv[1:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.parse_known_args(args=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE_: Union[str, Any] = {f.name for f in dataclasses.fields(lowerCAmelCase__) if f.init} SCREAMING_SNAKE_CASE_: Optional[int] = {k: v for k, v in vars(lowerCAmelCase__).items() if k in keys} for k in keys: delattr(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = dtype(**lowerCAmelCase__) outputs.append(lowerCAmelCase__) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(lowerCAmelCase__) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}") return (*outputs,) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict[str, Any] , lowerCAmelCase__ : bool = False): SCREAMING_SNAKE_CASE_: Any = set(args.keys()) SCREAMING_SNAKE_CASE_: Optional[Any] = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE_: Union[str, Any] = {f.name for f in dataclasses.fields(lowerCAmelCase__) if f.init} SCREAMING_SNAKE_CASE_: Optional[int] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) SCREAMING_SNAKE_CASE_: List[str] = dtype(**lowerCAmelCase__) outputs.append(lowerCAmelCase__) if not allow_extra_keys and unused_keys: raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase__)}") return tuple(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False): with open(Path(lowerCAmelCase__) , encoding="utf-8") as open_json_file: SCREAMING_SNAKE_CASE_: int = json.loads(open_json_file.read()) SCREAMING_SNAKE_CASE_: List[str] = self.parse_dict(lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__) return tuple(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False): SCREAMING_SNAKE_CASE_: str = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase__).read_text()) , allow_extra_keys=lowerCAmelCase__) return tuple(lowerCAmelCase__)
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[Any] = CodeGenTokenizer _UpperCAmelCase : int = CodeGenTokenizerFast _UpperCAmelCase : List[Any] = True _UpperCAmelCase : Optional[int] = {'''add_prefix_space''': True} _UpperCAmelCase : Union[str, Any] = False def _SCREAMING_SNAKE_CASE ( self : str): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE_: Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] SCREAMING_SNAKE_CASE_: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__)))) SCREAMING_SNAKE_CASE_: List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE_: str = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE_: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(lowerCAmelCase__) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase__ : str): kwargs.update(self.special_tokens_map) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase__ : Any): kwargs.update(self.special_tokens_map) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = "lower newer" SCREAMING_SNAKE_CASE_: Union[str, Any] = "lower newer" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) SCREAMING_SNAKE_CASE_: str = "lower newer" SCREAMING_SNAKE_CASE_: Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_: Dict = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = "lower newer" # Testing tokenization SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) # Testing the unknown token SCREAMING_SNAKE_CASE_: int = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE_: str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Tuple): # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): SCREAMING_SNAKE_CASE_: Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) # Simple input SCREAMING_SNAKE_CASE_: Any = "This is a simple input" SCREAMING_SNAKE_CASE_: Dict = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE_: List[str] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE_: str = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length") # Simple input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length") # Simple input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length") # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length") # Pair input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>") # Simple input SCREAMING_SNAKE_CASE_: str = "This is a simple input" SCREAMING_SNAKE_CASE_: List[str] = ["This is a simple input looooooooong", "This is a simple input"] SCREAMING_SNAKE_CASE_: str = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE_: List[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] SCREAMING_SNAKE_CASE_: Any = tokenizer.pad_token_id SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , padding="max_length" , max_length=30 , return_tensors="np") SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: List[str] = tokenizer(*lowerCAmelCase__ , padding="max_length" , max_length=60 , return_tensors="np") SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors="np") # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30) self.assertTrue(pad_token_id in out_s["input_ids"]) self.assertTrue(0 in out_s["attention_mask"]) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0]) self.assertFalse(0 in out_sa["attention_mask"][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1]) self.assertTrue(0 in out_sa["attention_mask"][1]) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60) self.assertTrue(pad_token_id in out_p["input_ids"]) self.assertTrue(0 in out_p["attention_mask"]) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0]) self.assertFalse(0 in out_pa["attention_mask"][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1]) self.assertTrue(0 in out_pa["attention_mask"][1]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = "$$$" SCREAMING_SNAKE_CASE_: Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase__ , add_bos_token=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = "This is a simple input" SCREAMING_SNAKE_CASE_: Any = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer(lowerCAmelCase__) self.assertEqual(out_s.input_ids[0] , lowerCAmelCase__) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(out_s.input_ids) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0] , lowerCAmelCase__) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono") SCREAMING_SNAKE_CASE_: Dict = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" SCREAMING_SNAKE_CASE_: str = "\nif len_a > len_b: result = a\nelse: result = b" SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.encode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = ["^#", re.escape("<|endoftext|>"), "^'''", "^\"\"\"", "\n\n\n"] SCREAMING_SNAKE_CASE_: Any = tokenizer.decode(lowerCAmelCase__ , truncate_before_pattern=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): pass
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : CommonSchedulerState # setable values _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : Optional[int] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , lowerCAmelCase__ : CommonSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray): return cls(common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__) @dataclass class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : DDPMSchedulerState class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers] _UpperCAmelCase : jnp.dtype @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return True @register_to_config def __init__( self : List[Any] , lowerCAmelCase__ : int = 1000 , lowerCAmelCase__ : float = 0.0001 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : str = "linear" , lowerCAmelCase__ : Optional[jnp.ndarray] = None , lowerCAmelCase__ : str = "fixed_small" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "epsilon" , lowerCAmelCase__ : jnp.dtype = jnp.floataa , ): SCREAMING_SNAKE_CASE_: Optional[Any] = dtype def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Optional[CommonSchedulerState] = None): if common is None: SCREAMING_SNAKE_CASE_: Tuple = CommonSchedulerState.create(self) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE_: List[Any] = jnp.array(1.0 , dtype=self.dtype) SCREAMING_SNAKE_CASE_: Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : Optional[int] = None): return sample def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple = ()): SCREAMING_SNAKE_CASE_: List[str] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE_: List[str] = (jnp.arange(0 , lowerCAmelCase__) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Any=None): SCREAMING_SNAKE_CASE_: int = state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE_: Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype)) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE_: Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE_: Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.clip(lowerCAmelCase__ , a_min=1E-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.log(jnp.clip(lowerCAmelCase__ , a_min=1E-20)) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE_: int = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE_: List[Any] = jnp.log(state.common.betas[t]) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE_: Dict = variance SCREAMING_SNAKE_CASE_: List[str] = state.common.betas[t] SCREAMING_SNAKE_CASE_: int = (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE_: Optional[Any] = frac * max_log + (1 - frac) * min_log return variance def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : Optional[jax.random.KeyArray] = None , lowerCAmelCase__ : bool = True , ): SCREAMING_SNAKE_CASE_: Tuple = timestep if key is None: SCREAMING_SNAKE_CASE_: int = jax.random.PRNGKey(0) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = jnp.split(lowerCAmelCase__ , sample.shape[1] , axis=1) else: SCREAMING_SNAKE_CASE_: List[str] = None # 1. compute alphas, betas SCREAMING_SNAKE_CASE_: Dict = state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE_: Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype)) SCREAMING_SNAKE_CASE_: int = 1 - alpha_prod_t SCREAMING_SNAKE_CASE_: Optional[int] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE_: Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE_: Union[str, Any] = model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE_: Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " " for the FlaxDDPMScheduler.") # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE_: Optional[int] = jnp.clip(lowerCAmelCase__ , -1 , 1) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE_: Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE_: List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE_: str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE_: int = jax.random.split(lowerCAmelCase__ , num=1) SCREAMING_SNAKE_CASE_: int = jax.random.normal(lowerCAmelCase__ , shape=model_output.shape , dtype=self.dtype) return (self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ , predicted_variance=lowerCAmelCase__) ** 0.5) * noise SCREAMING_SNAKE_CASE_: Any = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype)) SCREAMING_SNAKE_CASE_: Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase__ , state=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , ): return add_noise_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : DDPMSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , ): return get_velocity_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def __len__( self : Any): return self.config.num_train_timesteps
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : int = BertJapaneseTokenizer _UpperCAmelCase : int = False _UpperCAmelCase : Tuple = True def _SCREAMING_SNAKE_CASE ( self : Optional[int]): super().setUp() SCREAMING_SNAKE_CASE_: List[str] = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] SCREAMING_SNAKE_CASE_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。" SCREAMING_SNAKE_CASE_: Optional[Any] = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_input_output_texts(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) return text, ids def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Any): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : List[str]): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer_class(self.vocab_file) SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。") self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab") self.assertIsNotNone(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = "こんにちは、世界。\nこんばんは、世界。" SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(self.tmpdirname , "tokenizer.bin") with open(lowerCAmelCase__ , "wb") as handle: pickle.dump(lowerCAmelCase__ , lowerCAmelCase__) with open(lowerCAmelCase__ , "rb") as handle: SCREAMING_SNAKE_CASE_: Optional[int] = pickle.load(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = MecabTokenizer(mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): try: SCREAMING_SNAKE_CASE_: int = MecabTokenizer(mecab_dic="unidic_lite") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : Any): try: SCREAMING_SNAKE_CASE_: Optional[int] = MecabTokenizer(mecab_dic="unidic") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Tuple = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : str): try: SCREAMING_SNAKE_CASE_: Any = MecabTokenizer( do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic") except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Any = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi") self.assertIsNotNone(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = "こんにちは、世界。\nこんばんは、世界。" SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) SCREAMING_SNAKE_CASE_: Any = os.path.join(self.tmpdirname , "tokenizer.bin") with open(lowerCAmelCase__ , "wb") as handle: pickle.dump(lowerCAmelCase__ , lowerCAmelCase__) with open(lowerCAmelCase__ , "rb") as handle: SCREAMING_SNAKE_CASE_: List[Any] = pickle.load(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = SudachiTokenizer(sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A") self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国", "人", "参政", "権"]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B") self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人", "参政権"]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C") self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人参政権"]) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Optional[int] = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Dict = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , ) @require_sudachi def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Any = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp") self.assertIsNotNone(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = "こんにちは、世界。\nこんばんは、世界。" SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 12, 10, 14, 4, 9, 12, 10, 14]) SCREAMING_SNAKE_CASE_: int = os.path.join(self.tmpdirname , "tokenizer.bin") with open(lowerCAmelCase__ , "wb") as handle: pickle.dump(lowerCAmelCase__ , lowerCAmelCase__) with open(lowerCAmelCase__ , "rb") as handle: SCREAMING_SNAKE_CASE_: Union[str, Any] = pickle.load(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer_new.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: int = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Union[str, Any] = JumanppTokenizer(do_lower_case=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Dict = JumanppTokenizer(normalize_text=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: str = JumanppTokenizer(trim_whitespace=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , ) @require_jumanpp def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。") , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] SCREAMING_SNAKE_CASE_: Tuple = {} for i, token in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: int = i SCREAMING_SNAKE_CASE_: List[str] = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こんにちは"]) self.assertListEqual(tokenizer.tokenize("こんばんは") , ["こん", "##ばんは"]) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは") , ["こん", "##ばんは", "[UNK]", "こんにちは"]) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.subword_tokenizer SCREAMING_SNAKE_CASE_: Tuple = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") self.assertListEqual(lowerCAmelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) SCREAMING_SNAKE_CASE_: Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(lowerCAmelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese") SCREAMING_SNAKE_CASE_: Dict = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Dict = BertJapaneseTokenizer _UpperCAmelCase : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Dict): super().setUp() SCREAMING_SNAKE_CASE_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] SCREAMING_SNAKE_CASE_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase__ : str): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Tuple = "こんにちは、世界。 \nこんばんは、世界。" SCREAMING_SNAKE_CASE_: Optional[Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Tuple): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : List[str]): pass # TODO add if relevant def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character") SCREAMING_SNAKE_CASE_: Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。") self.assertListEqual( lowerCAmelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] SCREAMING_SNAKE_CASE_: str = {} for i, token in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[str] = i SCREAMING_SNAKE_CASE_: Optional[int] = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize("") , []) self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こ", "ん", "に", "ち", "は"]) self.assertListEqual(tokenizer.tokenize("こんにちほ") , ["こ", "ん", "に", "ち", "[UNK]"]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char") SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: str = "cl-tohoku/bert-base-japanese" SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers" , level="WARNING") as cm: BertTokenizer.from_pretrained(lowerCAmelCase__) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from.")) SCREAMING_SNAKE_CASE_: Tuple = "bert-base-cased" with self.assertLogs("transformers" , level="WARNING") as cm: BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from."))
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
import heapq import sys import numpy as np lowerCAmelCase : int = tuple[int, int] class __lowercase : """simple docstring""" def __init__( self : Tuple): SCREAMING_SNAKE_CASE_: str = [] SCREAMING_SNAKE_CASE_: str = set() def _SCREAMING_SNAKE_CASE ( self : str): if not self.empty(): return self.elements[0][0] else: return float("inf") def _SCREAMING_SNAKE_CASE ( self : int): return len(self.elements) == 0 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]): if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(lowerCAmelCase__) else: # update # print("update", item) SCREAMING_SNAKE_CASE_: Optional[Any] = [] ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Optional[int] = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[str]): if item in self.set: self.set.remove(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = [] ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Dict = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def _SCREAMING_SNAKE_CASE ( self : List[str]): return self.elements[0][1] def _SCREAMING_SNAKE_CASE ( self : str): ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Optional[Any] = heapq.heappop(self.elements) self.set.remove(lowerCAmelCase__) return (priority, item) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # euclidean distance SCREAMING_SNAKE_CASE_: List[str] = np.array(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = np.array(_UpperCAmelCase ) return np.linalg.norm(a - b ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # integer division by time variable return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase ) return ans def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = np.chararray((n, n) ) for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = "*" for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): if (j, (n - 1) - i) in blocks: SCREAMING_SNAKE_CASE_: str = "#" SCREAMING_SNAKE_CASE_: Optional[int] = "-" SCREAMING_SNAKE_CASE_: Dict = back_pointer[goal] while x != start: ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str = x # print(x) SCREAMING_SNAKE_CASE_: Dict = "-" SCREAMING_SNAKE_CASE_: List[str] = back_pointer[x] SCREAMING_SNAKE_CASE_: Union[str, Any] = "-" for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=" " ) print("<-- End position" , end=" " ) else: print(grid[i][j] , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) print("PATH TAKEN BY THE ALGORITHM IS:-" ) SCREAMING_SNAKE_CASE_: Any = back_pointer[goal] while x != start: print(_UpperCAmelCase , end=" " ) SCREAMING_SNAKE_CASE_: List[Any] = back_pointer[x] print(_UpperCAmelCase ) sys.exit() def A_ ( _UpperCAmelCase ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): for itera in range(_UpperCAmelCase ): open_list[itera].remove_element(_UpperCAmelCase ) # print("s", s) # print("j", j) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): int = s SCREAMING_SNAKE_CASE_: Any = (x - 1, y) SCREAMING_SNAKE_CASE_: str = (x + 1, y) SCREAMING_SNAKE_CASE_: Any = (x, y + 1) SCREAMING_SNAKE_CASE_: Union[str, Any] = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_UpperCAmelCase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = -1 SCREAMING_SNAKE_CASE_: Union[str, Any] = float("inf" ) if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1: SCREAMING_SNAKE_CASE_: Tuple = g_function[s] + 1 SCREAMING_SNAKE_CASE_: str = s if neighbours not in close_list_anchor: open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) ) if neighbours not in close_list_inad: for var in range(1 , _UpperCAmelCase ): if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key( _UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ): open_list[j].put( _UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list lowerCAmelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} lowerCAmelCase : Optional[int] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] lowerCAmelCase : Dict = make_common_ground() lowerCAmelCase : Optional[Any] = blocks_blk # hyper parameters lowerCAmelCase : Optional[Any] = 1 lowerCAmelCase : List[str] = 1 lowerCAmelCase : Dict = 20 lowerCAmelCase : str = 3 # one consistent and two other inconsistent # start and end destination lowerCAmelCase : Any = (0, 0) lowerCAmelCase : Any = (n - 1, n - 1) lowerCAmelCase : Optional[Any] = 1 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = {start: 0, goal: float("inf" )} SCREAMING_SNAKE_CASE_: List[str] = {start: -1, goal: -1} SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: str = set() for i in range(_UpperCAmelCase ): open_list.append(PriorityQueue() ) open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: list[int] = [] SCREAMING_SNAKE_CASE_: list[int] = [] while open_list[0].minkey() < float("inf" ): for i in range(1 , _UpperCAmelCase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf" ): do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = open_list[i].top_show() visited.add(_UpperCAmelCase ) expand_state( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) close_list_inad.append(_UpperCAmelCase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf" ): do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_: Any = open_list[0].top_show() visited.add(_UpperCAmelCase ) expand_state( _UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) close_list_anchor.append(_UpperCAmelCase ) print("No path found to goal" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_UpperCAmelCase ): if (j, i) in blocks: print("#" , end=" " ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*" , end=" " ) else: print("-" , end=" " ) else: print("*" , end=" " ) if (j, i) == (n - 1, n - 1): print("<-- End position" , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowerCAmelCase : List[str] = """\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ lowerCAmelCase : Any = """\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. """ lowerCAmelCase : str = """ Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: 'score' (float): TER score (num_edits / sum_ref_lengths * 100) 'num_edits' (int): The cumulative number of edits 'ref_length' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): if version.parse(scb.__version__) < version.parse("1.4.12"): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`.") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence"), "references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"), }) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ): SCREAMING_SNAKE_CASE_: Union[str, Any] = len(references[0]) if any(len(lowerCAmelCase__) != references_per_prediction for refs in references): raise ValueError("Sacrebleu requires the same number of references for each prediction") SCREAMING_SNAKE_CASE_: Optional[int] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)] SCREAMING_SNAKE_CASE_: int = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Union[str, Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = CustomTokenizer pass
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
import argparse import os import re import packaging.version lowerCAmelCase : int = """examples/""" lowerCAmelCase : Dict = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowerCAmelCase : Optional[int] = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } lowerCAmelCase : Any = """README.md""" def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE_: List[Any] = f.read() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = REPLACE_PATTERNS[pattern] SCREAMING_SNAKE_CASE_: Optional[Any] = replace.replace("VERSION" , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern="examples" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = "🤗 Transformers currently provides the following architectures" SCREAMING_SNAKE_CASE_: List[Any] = "1. Want to contribute a new model?" with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.readlines() # Find the start of the list. SCREAMING_SNAKE_CASE_: Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_: List[str] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): SCREAMING_SNAKE_CASE_: List[str] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , ) index += 1 with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_UpperCAmelCase ) def A_ ( ): with open(REPLACE_FILES["init"] , "r" ) as f: SCREAMING_SNAKE_CASE_: Optional[int] = f.read() SCREAMING_SNAKE_CASE_: List[Any] = REPLACE_PATTERNS["init"][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def A_ ( _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_: Optional[int] = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: SCREAMING_SNAKE_CASE_: Tuple = default_version.base_version elif patch: SCREAMING_SNAKE_CASE_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: SCREAMING_SNAKE_CASE_: int = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. SCREAMING_SNAKE_CASE_: Dict = input(f"Which version are you releasing? [{default_version}]" ) if len(_UpperCAmelCase ) == 0: SCREAMING_SNAKE_CASE_: Tuple = default_version print(f"Updating version to {version}." ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = get_version() SCREAMING_SNAKE_CASE_: List[str] = f"{current_version.major}.{current_version.minor + 1}.0.dev0" SCREAMING_SNAKE_CASE_: Union[str, Any] = current_version.base_version # Check with the user we got that right. SCREAMING_SNAKE_CASE_: List[Any] = input(f"Which version are we developing now? [{dev_version}]" ) if len(_UpperCAmelCase ) == 0: SCREAMING_SNAKE_CASE_: List[str] = dev_version print(f"Updating version to {version}." ) global_version_update(_UpperCAmelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowerCAmelCase : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase : Optional[int] = { """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Tuple = BarthezTokenizer _UpperCAmelCase : Optional[Any] = BarthezTokenizerFast _UpperCAmelCase : Tuple = True _UpperCAmelCase : Optional[Any] = True def _SCREAMING_SNAKE_CASE ( self : Optional[int]): super().setUp() SCREAMING_SNAKE_CASE_: Dict = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez") tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = tokenizer def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = "<pad>" SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Dict = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<s>") self.assertEqual(vocab_keys[1] , "<pad>") self.assertEqual(vocab_keys[-1] , "<mask>") self.assertEqual(len(lowerCAmelCase__) , 10_1122) def _SCREAMING_SNAKE_CASE ( self : Dict): self.assertEqual(self.get_tokenizer().vocab_size , 10_1122) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] SCREAMING_SNAKE_CASE_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2] SCREAMING_SNAKE_CASE_: int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt") self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) self.assertEqual((2, 6) , batch.input_ids.shape) self.assertEqual((2, 6) , batch.attention_mask.shape) SCREAMING_SNAKE_CASE_: Tuple = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE_: List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_: Union[str, Any] = "I was born in 92000, and this is falsé." SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.encode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): # fmt: off SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. SCREAMING_SNAKE_CASE_: Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
import argparse import os import re lowerCAmelCase : List[Any] = """src/transformers""" # Pattern that looks at the indentation in a line. lowerCAmelCase : Any = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowerCAmelCase : Tuple = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowerCAmelCase : Optional[int] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowerCAmelCase : List[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowerCAmelCase : Dict = re.compile(R"""\[([^\]]+)\]""") def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = _re_indent.search(_UpperCAmelCase ) return "" if search is None else search.groups()[0] def A_ ( _UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase=None , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: Optional[int] = 0 SCREAMING_SNAKE_CASE_: Optional[int] = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(_UpperCAmelCase ): index += 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = ["\n".join(lines[:index] )] else: SCREAMING_SNAKE_CASE_: Union[str, Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). SCREAMING_SNAKE_CASE_: List[Any] = [lines[index]] index += 1 while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(_UpperCAmelCase ) ) if index < len(_UpperCAmelCase ) - 1: SCREAMING_SNAKE_CASE_: str = [lines[index + 1]] index += 1 else: SCREAMING_SNAKE_CASE_: Optional[Any] = [] else: blocks.append("\n".join(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_UpperCAmelCase ) > 0: blocks.append("\n".join(_UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_UpperCAmelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def A_ ( _UpperCAmelCase ): def _inner(_UpperCAmelCase ): return key(_UpperCAmelCase ).lower().replace("_" , "" ) return _inner def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): # If no key is provided, we use a noop. def noop(_UpperCAmelCase ): return x if key is None: SCREAMING_SNAKE_CASE_: Any = noop # Constants are all uppercase, they go first. SCREAMING_SNAKE_CASE_: Union[str, Any] = [obj for obj in objects if key(_UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. SCREAMING_SNAKE_CASE_: Tuple = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. SCREAMING_SNAKE_CASE_: Optional[Any] = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()] SCREAMING_SNAKE_CASE_: Dict = ignore_underscore(_UpperCAmelCase ) return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): # This inner function sort imports between [ ]. def _replace(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = match.groups()[0] if "," not in imports: return f"[{imports}]" SCREAMING_SNAKE_CASE_: List[Any] = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: SCREAMING_SNAKE_CASE_: int = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] ) + "]" SCREAMING_SNAKE_CASE_: Any = import_statement.split("\n" ) if len(_UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. SCREAMING_SNAKE_CASE_: str = 2 if lines[1].strip() == "[" else 1 SCREAMING_SNAKE_CASE_: str = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] SCREAMING_SNAKE_CASE_: Tuple = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] ) SCREAMING_SNAKE_CASE_: str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: SCREAMING_SNAKE_CASE_: Any = _re_bracket_content.sub(_replace , lines[1] ) else: SCREAMING_SNAKE_CASE_: Union[str, Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: SCREAMING_SNAKE_CASE_: int = keys[:-1] SCREAMING_SNAKE_CASE_: str = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] ) return "\n".join(_UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line SCREAMING_SNAKE_CASE_: str = _re_bracket_content.sub(_replace , _UpperCAmelCase ) return import_statement def A_ ( _UpperCAmelCase , _UpperCAmelCase=True ): with open(_UpperCAmelCase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 SCREAMING_SNAKE_CASE_: Any = split_code_in_indented_blocks( _UpperCAmelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. SCREAMING_SNAKE_CASE_: List[str] = main_blocks[block_idx] SCREAMING_SNAKE_CASE_: Dict = block.split("\n" ) # Get to the start of the imports. SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: SCREAMING_SNAKE_CASE_: Optional[int] = len(_UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(_UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. SCREAMING_SNAKE_CASE_: List[Any] = "\n".join(block_lines[line_idx:-1] ) SCREAMING_SNAKE_CASE_: Optional[int] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. SCREAMING_SNAKE_CASE_: Tuple = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend SCREAMING_SNAKE_CASE_: str = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. SCREAMING_SNAKE_CASE_: str = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. SCREAMING_SNAKE_CASE_: Tuple = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None] SCREAMING_SNAKE_CASE_: Optional[Any] = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. SCREAMING_SNAKE_CASE_: Any = 0 SCREAMING_SNAKE_CASE_: Any = [] for i in range(len(_UpperCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: SCREAMING_SNAKE_CASE_: str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. SCREAMING_SNAKE_CASE_: Union[str, Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_UpperCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write("\n".join(_UpperCAmelCase ) ) def A_ ( _UpperCAmelCase=True ): SCREAMING_SNAKE_CASE_: List[str] = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: SCREAMING_SNAKE_CASE_: List[str] = sort_imports(os.path.join(_UpperCAmelCase , "__init__.py" ) , check_only=_UpperCAmelCase ) if result: SCREAMING_SNAKE_CASE_: int = [os.path.join(_UpperCAmelCase , "__init__.py" )] if len(_UpperCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_UpperCAmelCase )} files, run `make style`." ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
def A_ ( _UpperCAmelCase ): if n_term == "": return [] SCREAMING_SNAKE_CASE_: list = [] for temp in range(int(_UpperCAmelCase ) ): series.append(f"1/{temp + 1}" if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase : Any = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
from collections import Counter from timeit import timeit def A_ ( _UpperCAmelCase = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def A_ ( _UpperCAmelCase = "" ): if len(_UpperCAmelCase ) == 0: return True SCREAMING_SNAKE_CASE_: Tuple = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string SCREAMING_SNAKE_CASE_: dict[str, int] = {} for character in lower_case_input_str: SCREAMING_SNAKE_CASE_: Dict = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1 SCREAMING_SNAKE_CASE_: Tuple = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def A_ ( _UpperCAmelCase = "" ): print("\nFor string = " , _UpperCAmelCase , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": lowerCAmelCase : int = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) lowerCAmelCase : str = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
from math import sqrt def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" SCREAMING_SNAKE_CASE_: Dict = True # 0 and 1 are none primes. if number <= 1: SCREAMING_SNAKE_CASE_: Optional[int] = False for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: SCREAMING_SNAKE_CASE_: Any = False break # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool" return status def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N SCREAMING_SNAKE_CASE_: Any = list(range(2 , n + 1 ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_UpperCAmelCase ) ): for j in range(i + 1 , len(_UpperCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): SCREAMING_SNAKE_CASE_: Optional[Any] = 0 # filters actual prime numbers. SCREAMING_SNAKE_CASE_: Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2" SCREAMING_SNAKE_CASE_: Dict = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_UpperCAmelCase ): ans.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0" SCREAMING_SNAKE_CASE_: Any = [] # this list will be returns of the function. # potential prime number factors. SCREAMING_SNAKE_CASE_: Tuple = 2 SCREAMING_SNAKE_CASE_: str = number if number == 0 or number == 1: ans.append(_UpperCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_UpperCAmelCase ): while quotient != 1: if is_prime(_UpperCAmelCase ) and (quotient % factor == 0): ans.append(_UpperCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" SCREAMING_SNAKE_CASE_: Tuple = 0 # prime factorization of 'number' SCREAMING_SNAKE_CASE_: Optional[Any] = prime_factorization(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = max(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" SCREAMING_SNAKE_CASE_: List[Any] = 0 # prime factorization of 'number' SCREAMING_SNAKE_CASE_: Union[str, Any] = prime_factorization(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = min(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _UpperCAmelCase ): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase ) ), "'number' must been an int, even and > 2" SCREAMING_SNAKE_CASE_: str = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' SCREAMING_SNAKE_CASE_: Tuple = get_prime_numbers(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = len(_UpperCAmelCase ) # run variable for while-loops. SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: int = None # exit variable. for break up the loops SCREAMING_SNAKE_CASE_: List[str] = True while i < len_pn and loop: SCREAMING_SNAKE_CASE_: Optional[int] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: SCREAMING_SNAKE_CASE_: List[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (len(_UpperCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _UpperCAmelCase , _UpperCAmelCase ): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." SCREAMING_SNAKE_CASE_: int = 0 while numbera != 0: SCREAMING_SNAKE_CASE_: Optional[Any] = numbera % numbera SCREAMING_SNAKE_CASE_: int = numbera SCREAMING_SNAKE_CASE_: Tuple = rest # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _UpperCAmelCase , _UpperCAmelCase ): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." SCREAMING_SNAKE_CASE_: Any = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' SCREAMING_SNAKE_CASE_: Any = prime_factorization(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = prime_factorization(_UpperCAmelCase ) elif numbera == 1 or numbera == 1: SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: Union[str, Any] = [] SCREAMING_SNAKE_CASE_: Optional[int] = max(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: SCREAMING_SNAKE_CASE_: Optional[Any] = prime_fac_a.count(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = prime_fac_a.count(_UpperCAmelCase ) for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ): ans *= n else: SCREAMING_SNAKE_CASE_: Optional[int] = prime_fac_a.count(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ): ans *= n done.append(_UpperCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: SCREAMING_SNAKE_CASE_: List[Any] = prime_fac_a.count(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ): ans *= n done.append(_UpperCAmelCase ) # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int" SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: Optional[Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_UpperCAmelCase ): ans += 1 # precondition assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime( _UpperCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _UpperCAmelCase , _UpperCAmelCase ): assert ( is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" SCREAMING_SNAKE_CASE_: Optional[Any] = p_number_a + 1 # jump to the next number SCREAMING_SNAKE_CASE_: Optional[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_UpperCAmelCase ): number += 1 while number < p_number_a: ans.append(_UpperCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_UpperCAmelCase ): number += 1 # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ans[0] != p_number_a and ans[len(_UpperCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1" SCREAMING_SNAKE_CASE_: Any = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_UpperCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" SCREAMING_SNAKE_CASE_: List[str] = get_divisors(_UpperCAmelCase ) # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (divisors[0] == 1) and (divisors[len(_UpperCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _UpperCAmelCase , _UpperCAmelCase ): assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. SCREAMING_SNAKE_CASE_: str = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) ) # precondition assert ( isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" SCREAMING_SNAKE_CASE_: List[Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _UpperCAmelCase ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" SCREAMING_SNAKE_CASE_: List[str] = 0 SCREAMING_SNAKE_CASE_: Optional[int] = 1 SCREAMING_SNAKE_CASE_: List[Any] = 1 # this will be return for _ in range(n - 1 ): SCREAMING_SNAKE_CASE_: Tuple = ans ans += fiba SCREAMING_SNAKE_CASE_: List[str] = tmp return ans
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : str = TransfoXLTokenizer _UpperCAmelCase : int = False _UpperCAmelCase : Union[str, Any] = False def _SCREAMING_SNAKE_CASE ( self : Dict): super().setUp() SCREAMING_SNAKE_CASE_: List[Any] = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: List[Any] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: Tuple = "<unk> UNwanted , running" SCREAMING_SNAKE_CASE_: Any = "<unk> unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize("<unk> UNwanted , running") self.assertListEqual(lowerCAmelCase__ , ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [0, 4, 8, 7]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? ") , ["hello", "!", "how", "are", "you", "?"]) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: str = TransfoXLTokenizer(lower_case=lowerCAmelCase__) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"]) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Any = TransfoXLTokenizer(lower_case=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" SCREAMING_SNAKE_CASE_: str = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__) , lowerCAmelCase__) self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = len(lowerCAmelCase__) tokenizer.add_tokens(["new1", "new2"]) tokenizer.move_added_token("new1" , 1) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowerCAmelCase__) , original_len + 2) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1") , [1]) self.assertEqual(tokenizer.decode([1]) , "new1")
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def A_ ( _UpperCAmelCase = True , *_UpperCAmelCase , **_UpperCAmelCase ): if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) SCREAMING_SNAKE_CASE_: int = False if main_process_only: SCREAMING_SNAKE_CASE_: Union[str, Any] = PartialState().local_process_index == 0 return _tqdm(*_UpperCAmelCase , **_UpperCAmelCase , disable=_UpperCAmelCase )
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[str] = (PNDMScheduler,) _UpperCAmelCase : List[str] = (('''num_inference_steps''', 50),) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: str = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__) return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_: Any = kwargs.pop("num_inference_steps" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = self.dummy_sample SCREAMING_SNAKE_CASE_: int = 0.1 * sample SCREAMING_SNAKE_CASE_: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: List[Any] = self.get_scheduler_config(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals SCREAMING_SNAKE_CASE_: List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = scheduler_class.from_pretrained(lowerCAmelCase__) new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals SCREAMING_SNAKE_CASE_: Tuple = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_: str = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Optional[int] = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_: List[Any] = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Union[str, Any] = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any): pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[Any]=0 , **lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_: List[str] = kwargs.pop("num_inference_steps" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = self.dummy_sample SCREAMING_SNAKE_CASE_: Optional[Any] = 0.1 * sample SCREAMING_SNAKE_CASE_: int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: str = self.get_scheduler_config() SCREAMING_SNAKE_CASE_: Tuple = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE_: List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = scheduler_class.from_pretrained(lowerCAmelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE_: Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_: Dict = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Optional[int] = new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" SCREAMING_SNAKE_CASE_: Dict = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Optional[Any] = new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: Any = self.get_scheduler_config(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = scheduler_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = 10 SCREAMING_SNAKE_CASE_: Tuple = self.dummy_model() SCREAMING_SNAKE_CASE_: Dict = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__) for i, t in enumerate(scheduler.prk_timesteps): SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample for i, t in enumerate(scheduler.plms_timesteps): SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = dict(self.forward_default_kwargs) SCREAMING_SNAKE_CASE_: Dict = kwargs.pop("num_inference_steps" , lowerCAmelCase__) for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_: str = scheduler_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = self.dummy_sample SCREAMING_SNAKE_CASE_: Any = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps"): scheduler.set_timesteps(lowerCAmelCase__) elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps"): SCREAMING_SNAKE_CASE_: int = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) SCREAMING_SNAKE_CASE_: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] SCREAMING_SNAKE_CASE_: Any = dummy_past_residuals[:] SCREAMING_SNAKE_CASE_: Optional[Any] = scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: List[str] = scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) SCREAMING_SNAKE_CASE_: Tuple = scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample SCREAMING_SNAKE_CASE_: Dict = scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def _SCREAMING_SNAKE_CASE ( self : Tuple): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: int = self.get_scheduler_config(steps_offset=1) SCREAMING_SNAKE_CASE_: Any = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(10) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]): self.check_over_forward(num_inference_steps=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 SCREAMING_SNAKE_CASE_: Any = 27 for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE_: Optional[int] = self.dummy_sample SCREAMING_SNAKE_CASE_: Union[str, Any] = 0.1 * sample SCREAMING_SNAKE_CASE_: Tuple = self.get_scheduler_config() SCREAMING_SNAKE_CASE_: List[Any] = scheduler_class(**lowerCAmelCase__) scheduler.set_timesteps(lowerCAmelCase__) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2]): SCREAMING_SNAKE_CASE_: Any = scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__).prev_sample def _SCREAMING_SNAKE_CASE ( self : Any): with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE_: List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE_: Optional[int] = scheduler_class(**lowerCAmelCase__) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: List[Any] = self.full_loop() SCREAMING_SNAKE_CASE_: Any = torch.sum(torch.abs(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: str = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_sum.item() - 198.1318) < 1E-2 assert abs(result_mean.item() - 0.2580) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: str = self.full_loop(prediction_type="v_prediction") SCREAMING_SNAKE_CASE_: int = torch.sum(torch.abs(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: str = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_sum.item() - 67.3986) < 1E-2 assert abs(result_mean.item() - 0.0878) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Any): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE_: Optional[Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01) SCREAMING_SNAKE_CASE_: str = torch.sum(torch.abs(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_sum.item() - 230.0399) < 1E-2 assert abs(result_mean.item() - 0.2995) < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE_: Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01) SCREAMING_SNAKE_CASE_: Tuple = torch.sum(torch.abs(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__)) assert abs(result_sum.item() - 186.9482) < 1E-2 assert abs(result_mean.item() - 0.2434) < 1E-3
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = '''vit_mae''' def __init__( self : Dict , lowerCAmelCase__ : str=768 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : Optional[int]=3072 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : int=1E-12 , lowerCAmelCase__ : List[str]=224 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=16 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Tuple=2048 , lowerCAmelCase__ : List[str]=0.75 , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE_: Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE_: List[str] = intermediate_size SCREAMING_SNAKE_CASE_: List[Any] = hidden_act SCREAMING_SNAKE_CASE_: Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Tuple = initializer_range SCREAMING_SNAKE_CASE_: str = layer_norm_eps SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size SCREAMING_SNAKE_CASE_: str = patch_size SCREAMING_SNAKE_CASE_: str = num_channels SCREAMING_SNAKE_CASE_: Dict = qkv_bias SCREAMING_SNAKE_CASE_: Tuple = decoder_num_attention_heads SCREAMING_SNAKE_CASE_: List[Any] = decoder_hidden_size SCREAMING_SNAKE_CASE_: Any = decoder_num_hidden_layers SCREAMING_SNAKE_CASE_: Any = decoder_intermediate_size SCREAMING_SNAKE_CASE_: Tuple = mask_ratio SCREAMING_SNAKE_CASE_: Tuple = norm_pix_loss
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : str = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import argparse lowerCAmelCase : Optional[Any] = """docs/source/_static/js/custom.js""" def A_ ( _UpperCAmelCase ): with open(_UpperCAmelCase , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE_: List[str] = f.readlines() SCREAMING_SNAKE_CASE_: Dict = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 SCREAMING_SNAKE_CASE_: Any = f"const stableVersion = \"v{version}\"\n" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += f" \"v{version}\": \"v{version}\",\n" with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") lowerCAmelCase : str = parser.parse_args() update_custom_js(args.version)
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]=13 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[str]=224 , lowerCAmelCase__ : Dict=30 , lowerCAmelCase__ : List[str]=400 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ): SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE_: Tuple = parent SCREAMING_SNAKE_CASE_: str = batch_size SCREAMING_SNAKE_CASE_: Dict = num_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size SCREAMING_SNAKE_CASE_: Union[str, Any] = min_resolution SCREAMING_SNAKE_CASE_: Union[str, Any] = max_resolution SCREAMING_SNAKE_CASE_: List[Any] = do_resize SCREAMING_SNAKE_CASE_: List[Any] = size SCREAMING_SNAKE_CASE_: str = do_normalize SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean SCREAMING_SNAKE_CASE_: Union[str, Any] = image_std def _SCREAMING_SNAKE_CASE ( self : int): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : str = ViTImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = EfficientFormerImageProcessorTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Dict): return self.image_proc_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[str] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean")) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std")) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize")) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize")) self.assertTrue(hasattr(lowerCAmelCase__ , "size")) def _SCREAMING_SNAKE_CASE ( self : str): pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): # Initialize image_processor SCREAMING_SNAKE_CASE_: str = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Any = image_processor(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): # Initialize image_processor SCREAMING_SNAKE_CASE_: List[str] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_: Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_: Any = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: List[str] = image_processor(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): # Initialize image_processor SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_: int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_: Dict = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
from collections import defaultdict from math import gcd def A_ ( _UpperCAmelCase = 1_50_00_00 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _UpperCAmelCase , 2 ): if gcd(_UpperCAmelCase , _UpperCAmelCase ) > 1: continue SCREAMING_SNAKE_CASE_: str = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_UpperCAmelCase , limit + 1 , _UpperCAmelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Any = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Dict = """The Nymphenburg Palace is a beautiful palace in Munich!""" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = { "attention_cell": "multi_head", "num_layers": 4, "units": 10_24, "hidden_size": 7_68, "max_length": 5_12, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 10_24, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } SCREAMING_SNAKE_CASE_: Union[str, Any] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py SCREAMING_SNAKE_CASE_: List[str] = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _UpperCAmelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later SCREAMING_SNAKE_CASE_: List[str] = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab SCREAMING_SNAKE_CASE_: str = os.path.join(get_home_dir() , "models" ) SCREAMING_SNAKE_CASE_: List[Any] = _load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = nlp.model.BERTModel( _UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , ) original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 SCREAMING_SNAKE_CASE_: Any = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.0_2, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(_UpperCAmelCase ), } SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_dict(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = BertForMaskedLM(_UpperCAmelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(_UpperCAmelCase ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = hf_param.shape SCREAMING_SNAKE_CASE_: int = to_torch(params[gluon_param] ) SCREAMING_SNAKE_CASE_: Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers" return gluon_param SCREAMING_SNAKE_CASE_: Dict = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) SCREAMING_SNAKE_CASE_: int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) SCREAMING_SNAKE_CASE_: int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) SCREAMING_SNAKE_CASE_: Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) SCREAMING_SNAKE_CASE_: Any = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): SCREAMING_SNAKE_CASE_: BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention SCREAMING_SNAKE_CASE_: BertSelfAttention = layer.attention.self SCREAMING_SNAKE_CASE_: Optional[Any] = check_and_map_params( self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" ) SCREAMING_SNAKE_CASE_: Tuple = check_and_map_params( self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" ) SCREAMING_SNAKE_CASE_: List[str] = check_and_map_params( self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" ) SCREAMING_SNAKE_CASE_: List[str] = check_and_map_params( self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" ) SCREAMING_SNAKE_CASE_: Any = check_and_map_params( self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" ) SCREAMING_SNAKE_CASE_: List[Any] = check_and_map_params( self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" ) # self attention output SCREAMING_SNAKE_CASE_: BertSelfOutput = layer.attention.output SCREAMING_SNAKE_CASE_: Optional[Any] = check_and_map_params( self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" ) SCREAMING_SNAKE_CASE_: List[str] = check_and_map_params( self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" ) SCREAMING_SNAKE_CASE_: Any = check_and_map_params( self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" ) SCREAMING_SNAKE_CASE_: int = check_and_map_params( self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" ) # intermediate SCREAMING_SNAKE_CASE_: BertIntermediate = layer.intermediate SCREAMING_SNAKE_CASE_: Union[str, Any] = check_and_map_params( intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" ) SCREAMING_SNAKE_CASE_: Tuple = check_and_map_params( intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" ) # output SCREAMING_SNAKE_CASE_: BertOutput = layer.output SCREAMING_SNAKE_CASE_: Optional[int] = check_and_map_params( bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" ) SCREAMING_SNAKE_CASE_: int = check_and_map_params( bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" ) SCREAMING_SNAKE_CASE_: str = check_and_map_params( bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" ) SCREAMING_SNAKE_CASE_: int = check_and_map_params( bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models SCREAMING_SNAKE_CASE_: Tuple = RobertaTokenizer.from_pretrained("roberta-base" ) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.encode_plus(_UpperCAmelCase )["input_ids"] # Get gluon output SCREAMING_SNAKE_CASE_: Dict = mx.nd.array([input_ids] ) SCREAMING_SNAKE_CASE_: Optional[Any] = original_bort(inputs=_UpperCAmelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = BertModel.from_pretrained(_UpperCAmelCase ) hf_bort_model.eval() SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.encode_plus(_UpperCAmelCase , return_tensors="pt" ) SCREAMING_SNAKE_CASE_: Dict = hf_bort_model(**_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE_: List[Any] = output_gluon[0].asnumpy() SCREAMING_SNAKE_CASE_: List[Any] = output_hf[0].detach().numpy() SCREAMING_SNAKE_CASE_: Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() SCREAMING_SNAKE_CASE_: List[str] = np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : List[str] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : str = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = '''beit''' def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=8192 , lowerCAmelCase__ : Dict=768 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Any=12 , lowerCAmelCase__ : str=3072 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : str=1E-12 , lowerCAmelCase__ : Tuple=224 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=[3, 5, 7, 11] , lowerCAmelCase__ : Union[str, Any]=[1, 2, 3, 6] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=0.4 , lowerCAmelCase__ : str=256 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[Any]=255 , **lowerCAmelCase__ : Optional[Any] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE_: List[str] = hidden_size SCREAMING_SNAKE_CASE_: str = num_hidden_layers SCREAMING_SNAKE_CASE_: Dict = num_attention_heads SCREAMING_SNAKE_CASE_: Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_: List[str] = hidden_act SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Tuple = initializer_range SCREAMING_SNAKE_CASE_: str = layer_norm_eps SCREAMING_SNAKE_CASE_: Dict = image_size SCREAMING_SNAKE_CASE_: Union[str, Any] = patch_size SCREAMING_SNAKE_CASE_: Optional[int] = num_channels SCREAMING_SNAKE_CASE_: Optional[int] = use_mask_token SCREAMING_SNAKE_CASE_: str = use_absolute_position_embeddings SCREAMING_SNAKE_CASE_: Optional[Any] = use_relative_position_bias SCREAMING_SNAKE_CASE_: int = use_shared_relative_position_bias SCREAMING_SNAKE_CASE_: Dict = layer_scale_init_value SCREAMING_SNAKE_CASE_: Any = drop_path_rate SCREAMING_SNAKE_CASE_: int = use_mean_pooling # decode head attributes (semantic segmentation) SCREAMING_SNAKE_CASE_: Union[str, Any] = out_indices SCREAMING_SNAKE_CASE_: Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE_: Optional[int] = use_auxiliary_head SCREAMING_SNAKE_CASE_: Optional[int] = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: Tuple = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Union[str, Any] = semantic_loss_ignore_index class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Any): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def _SCREAMING_SNAKE_CASE ( self : int): return 1E-4
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 lowerCAmelCase : Any = get_tests_dir("""fixtures/dummy-config.json""") class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = 0 def _SCREAMING_SNAKE_CASE ( self : Tuple): self.assertIsNotNone(transformers.models.auto.__spec__) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto")) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("bert-base-uncased") self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Dict = AutoConfig.from_pretrained(lowerCAmelCase__) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.for_model("roberta") self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. SCREAMING_SNAKE_CASE_: List[str] = os.path.join(lowerCAmelCase__ , "fake-roberta") os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) with open(os.path.join(lowerCAmelCase__ , "config.json") , "w") as f: f.write(json.dumps({})) SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__) self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): try: AutoConfig.register("custom" , lowerCAmelCase__) # Wrong model type will raise an error with self.assertRaises(lowerCAmelCase__): AutoConfig.register("model" , lowerCAmelCase__) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__): AutoConfig.register("bert" , lowerCAmelCase__) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE_: Any = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def _SCREAMING_SNAKE_CASE ( self : Dict): with self.assertRaisesRegex( lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"): SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained("bert-base") def _SCREAMING_SNAKE_CASE ( self : Tuple): with self.assertRaisesRegex( lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"): SCREAMING_SNAKE_CASE_: Dict = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa") def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): with self.assertRaisesRegex( lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo") def _SCREAMING_SNAKE_CASE ( self : Dict): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model") # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__) self.assertEqual(config.__class__.__name__ , "NewModelConfig") # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig") def _SCREAMING_SNAKE_CASE ( self : Optional[int]): class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = '''new-model''' try: AutoConfig.register("new-model" , lowerCAmelCase__) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model") self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal") # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal") # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__) self.assertEqual(config.__class__.__name__ , "NewModelConfig") finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
def A_ ( _UpperCAmelCase = 1_00 ): SCREAMING_SNAKE_CASE_: Optional[Any] = 0 SCREAMING_SNAKE_CASE_: str = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
from ...configuration_utils import PretrainedConfig class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = '''bert-generation''' def __init__( self : Optional[Any] , lowerCAmelCase__ : Dict=5_0358 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : Union[str, Any]=16 , lowerCAmelCase__ : str=4096 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Tuple=1E-12 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : str="absolute" , lowerCAmelCase__ : Union[str, Any]=True , **lowerCAmelCase__ : str , ): super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = vocab_size SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers SCREAMING_SNAKE_CASE_: Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE_: Dict = hidden_act SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: List[str] = max_position_embeddings SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range SCREAMING_SNAKE_CASE_: Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE_: List[str] = position_embedding_type SCREAMING_SNAKE_CASE_: List[str] = use_cache
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small") SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small") SCREAMING_SNAKE_CASE_: int = tokenizer("Hello there" , return_tensors="tf").input_ids SCREAMING_SNAKE_CASE_: int = tokenizer("Hi I am" , return_tensors="tf").input_ids SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__).loss SCREAMING_SNAKE_CASE_: List[str] = -tf.math.reduce_mean(lowerCAmelCase__).numpy() SCREAMING_SNAKE_CASE_: str = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2E-4)
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = '''convbert''' def __init__( self : str , lowerCAmelCase__ : Dict=3_0522 , lowerCAmelCase__ : Union[str, Any]=768 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : Optional[Any]=3072 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Dict=1E-12 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Dict=768 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Union[str, Any]=9 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : str , ): super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Tuple = vocab_size SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_: str = num_attention_heads SCREAMING_SNAKE_CASE_: int = intermediate_size SCREAMING_SNAKE_CASE_: Dict = hidden_act SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: List[str] = max_position_embeddings SCREAMING_SNAKE_CASE_: List[str] = type_vocab_size SCREAMING_SNAKE_CASE_: str = initializer_range SCREAMING_SNAKE_CASE_: Dict = layer_norm_eps SCREAMING_SNAKE_CASE_: Optional[Any] = embedding_size SCREAMING_SNAKE_CASE_: int = head_ratio SCREAMING_SNAKE_CASE_: Tuple = conv_kernel_size SCREAMING_SNAKE_CASE_: Optional[int] = num_groups SCREAMING_SNAKE_CASE_: Union[str, Any] = classifier_dropout class __lowercase ( UpperCAmelCase_ ): """simple docstring""" @property def _SCREAMING_SNAKE_CASE ( self : str): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_: str = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE_: Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ])
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" _UpperCAmelCase : str = ViTImageProcessor if is_vision_available() else None @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: str = (3, 32, 128) SCREAMING_SNAKE_CASE_: List[str] = tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE_: Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on SCREAMING_SNAKE_CASE_: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__)))) SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(lowerCAmelCase__) + "\n") SCREAMING_SNAKE_CASE_: Optional[Any] = { "do_normalize": False, "do_resize": True, "image_processor_type": "ViTImageProcessor", "resample": 3, "size": {"height": 32, "width": 128}, } SCREAMING_SNAKE_CASE_: List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Any): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Any): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: str = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) SCREAMING_SNAKE_CASE_: int = Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) return image_input def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Tuple = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase__) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0) SCREAMING_SNAKE_CASE_: Optional[Any] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase__) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: int = self.get_image_processor() SCREAMING_SNAKE_CASE_: int = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: str = processor(images=lowerCAmelCase__ , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = "test" SCREAMING_SNAKE_CASE_: int = processor(text=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = tokenizer(lowerCAmelCase__) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: str = self.get_image_processor() SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = "test" SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: Dict = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , ["pixel_values", "labels"]) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__): processor() def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[int] = self.get_image_processor() SCREAMING_SNAKE_CASE_: str = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_: Dict = processor.char_decode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.batch_decode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = [seq.replace(" " , "") for seq in decoded_tok] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Tuple = self.get_image_processor() SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Optional[Any] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = None SCREAMING_SNAKE_CASE_: Optional[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[Any] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = torch.randn(1 , 27 , 38) SCREAMING_SNAKE_CASE_: Tuple = torch.randn(1 , 27 , 5_0257) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.randn(1 , 27 , 3_0522) SCREAMING_SNAKE_CASE_: int = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"])
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
from __future__ import annotations def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = 0.0_0 SCREAMING_SNAKE_CASE_: int = 0 for resistor in resistors: if resistor <= 0: SCREAMING_SNAKE_CASE_: Any = f"Resistor at index {index} has a negative or zero value!" raise ValueError(_UpperCAmelCase ) first_sum += 1 / float(_UpperCAmelCase ) index += 1 return 1 / first_sum def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = 0.0_0 SCREAMING_SNAKE_CASE_: Optional[int] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: SCREAMING_SNAKE_CASE_: str = f"Resistor at index {index} has a negative value!" raise ValueError(_UpperCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = """ybelkada/fonts""" def A_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " "Pix2StructImageProcessor. Please upgrade torch." ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): requires_backends(_UpperCAmelCase , ["torch"] ) _check_torch_version() SCREAMING_SNAKE_CASE_: Union[str, Any] = image_tensor.unsqueeze(0 ) SCREAMING_SNAKE_CASE_: List[Any] = torch.nn.functional.unfold(_UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) SCREAMING_SNAKE_CASE_: str = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCAmelCase , _UpperCAmelCase , -1 ) SCREAMING_SNAKE_CASE_: Union[str, Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def A_ ( _UpperCAmelCase , _UpperCAmelCase = 36 , _UpperCAmelCase = "black" , _UpperCAmelCase = "white" , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = None , _UpperCAmelCase = None , ): requires_backends(_UpperCAmelCase , "vision" ) # Add new lines so that each line is no more than 80 characters. SCREAMING_SNAKE_CASE_: Optional[int] = textwrap.TextWrapper(width=80 ) SCREAMING_SNAKE_CASE_: int = wrapper.wrap(text=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = "\n".join(_UpperCAmelCase ) if font_bytes is not None and font_path is None: SCREAMING_SNAKE_CASE_: Union[str, Any] = io.BytesIO(_UpperCAmelCase ) elif font_path is not None: SCREAMING_SNAKE_CASE_: Tuple = font_path else: SCREAMING_SNAKE_CASE_: str = hf_hub_download(_UpperCAmelCase , "Arial.TTF" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = ImageFont.truetype(_UpperCAmelCase , encoding="UTF-8" , size=_UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. SCREAMING_SNAKE_CASE_: List[str] = ImageDraw.Draw(Image.new("RGB" , (1, 1) , _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = temp_draw.textbbox((0, 0) , _UpperCAmelCase , _UpperCAmelCase ) # Create the actual image with a bit of padding around the text. SCREAMING_SNAKE_CASE_: Optional[Any] = text_width + left_padding + right_padding SCREAMING_SNAKE_CASE_: Optional[int] = text_height + top_padding + bottom_padding SCREAMING_SNAKE_CASE_: List[str] = Image.new("RGB" , (image_width, image_height) , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ImageDraw.Draw(_UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=_UpperCAmelCase , fill=_UpperCAmelCase , font=_UpperCAmelCase ) return image def A_ ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , "vision" ) # Convert to PIL image if necessary SCREAMING_SNAKE_CASE_: Any = to_pil_image(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = render_text(_UpperCAmelCase , **_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = max(header_image.width , image.width ) SCREAMING_SNAKE_CASE_: Optional[int] = int(image.height * (new_width / image.width) ) SCREAMING_SNAKE_CASE_: List[Any] = int(header_image.height * (new_width / header_image.width) ) SCREAMING_SNAKE_CASE_: Optional[Any] = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary SCREAMING_SNAKE_CASE_: Optional[Any] = to_numpy_array(_UpperCAmelCase ) if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST: SCREAMING_SNAKE_CASE_: Any = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST ) return new_image class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = ['''flattened_patches'''] def __init__( self : Union[str, Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : int = 2048 , lowerCAmelCase__ : bool = False , **lowerCAmelCase__ : str , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = patch_size if patch_size is not None else {"height": 16, "width": 16} SCREAMING_SNAKE_CASE_: Union[str, Any] = do_normalize SCREAMING_SNAKE_CASE_: int = do_convert_rgb SCREAMING_SNAKE_CASE_: Tuple = max_patches SCREAMING_SNAKE_CASE_: Optional[Any] = is_vqa def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : dict , **lowerCAmelCase__ : List[str]): requires_backends(self.extract_flattened_patches , "torch") _check_torch_version() # convert to torch SCREAMING_SNAKE_CASE_: Tuple = to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.FIRST) SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = patch_size["height"], patch_size["width"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_image_size(lowerCAmelCase__) # maximize scale s.t. SCREAMING_SNAKE_CASE_: List[str] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) SCREAMING_SNAKE_CASE_: Tuple = max(min(math.floor(scale * image_height / patch_height) , lowerCAmelCase__) , 1) SCREAMING_SNAKE_CASE_: Union[str, Any] = max(min(math.floor(scale * image_width / patch_width) , lowerCAmelCase__) , 1) SCREAMING_SNAKE_CASE_: List[Any] = max(num_feasible_rows * patch_height , 1) SCREAMING_SNAKE_CASE_: Tuple = max(num_feasible_cols * patch_width , 1) SCREAMING_SNAKE_CASE_: List[str] = torch.nn.functional.interpolate( image.unsqueeze(0) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowerCAmelCase__ , antialias=lowerCAmelCase__ , ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE_: Optional[int] = torch_extract_patches(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = patches.shape SCREAMING_SNAKE_CASE_: Union[str, Any] = patches_shape[1] SCREAMING_SNAKE_CASE_: List[str] = patches_shape[2] SCREAMING_SNAKE_CASE_: Tuple = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE_: List[str] = patches.reshape([rows * columns, depth]) # [rows * columns, 1] SCREAMING_SNAKE_CASE_: int = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1 , lowerCAmelCase__).reshape([rows * columns, 1]) SCREAMING_SNAKE_CASE_: List[Any] = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__ , 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] SCREAMING_SNAKE_CASE_: Any = row_ids.to(torch.floataa) SCREAMING_SNAKE_CASE_: List[str] = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE_: List[Any] = torch.cat([row_ids, col_ids, patches] , -1) # [max_patches, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE_: List[str] = torch.nn.functional.pad(lowerCAmelCase__ , [0, 0, 0, max_patches - (rows * columns)]).float() SCREAMING_SNAKE_CASE_: Dict = to_numpy_array(lowerCAmelCase__) return result def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict): if image.dtype == np.uinta: SCREAMING_SNAKE_CASE_: Optional[Any] = image.astype(np.floataa) # take mean across the whole `image` SCREAMING_SNAKE_CASE_: str = np.mean(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = np.std(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = max(lowerCAmelCase__ , 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[int] , ): SCREAMING_SNAKE_CASE_: Dict = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE_: Optional[Any] = patch_size if patch_size is not None else self.patch_size SCREAMING_SNAKE_CASE_: Dict = max_patches if max_patches is not None else self.max_patches SCREAMING_SNAKE_CASE_: List[str] = self.is_vqa if kwargs.get("data_format" , lowerCAmelCase__) is not None: raise ValueError("data_format is not an accepted input as the outputs are ") SCREAMING_SNAKE_CASE_: Optional[Any] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE_: List[str] = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Optional[int] = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models.") SCREAMING_SNAKE_CASE_: Any = kwargs.pop("font_bytes" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = kwargs.pop("font_path" , lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Any = [header_text] * len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = [ render_header(lowerCAmelCase__ , header_text[i] , font_bytes=lowerCAmelCase__ , font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute SCREAMING_SNAKE_CASE_: Union[str, Any] = [ self.extract_flattened_patches(image=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy SCREAMING_SNAKE_CASE_: List[Any] = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] SCREAMING_SNAKE_CASE_: Any = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowerCAmelCase__) return encoded_outputs
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor lowerCAmelCase : Any = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple): warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase : List[str] = 1.0_54_57_18_17E-34 # unit of ℏ : J * s lowerCAmelCase : str = 3E8 # unit of c : m * s^-1 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: SCREAMING_SNAKE_CASE_: Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: SCREAMING_SNAKE_CASE_: int = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: SCREAMING_SNAKE_CASE_: Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]]): super().__init__() SCREAMING_SNAKE_CASE_: List[Any] = nn.ModuleList(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Union[torch.Tensor, float, int] , lowerCAmelCase__ : torch.Tensor , lowerCAmelCase__ : List[torch.tensor] , lowerCAmelCase__ : List[float] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True , ): for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ , self.nets)): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = controlnet( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # merge samples if i == 0: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = down_samples, mid_sample else: SCREAMING_SNAKE_CASE_: int = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowerCAmelCase__ , lowerCAmelCase__) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Union[str, os.PathLike] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Callable = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[str] = None , ): SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: Optional[Any] = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowerCAmelCase__ , is_main_process=lowerCAmelCase__ , save_function=lowerCAmelCase__ , safe_serialization=lowerCAmelCase__ , variant=lowerCAmelCase__ , ) idx += 1 SCREAMING_SNAKE_CASE_: int = model_path_to_save + F"_{idx}" @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] , **lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: List[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... SCREAMING_SNAKE_CASE_: Dict = pretrained_model_path while os.path.isdir(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Union[str, Any] = ControlNetModel.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) controlnets.append(lowerCAmelCase__) idx += 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = pretrained_model_path + F"_{idx}" logger.info(F"{len(lowerCAmelCase__)} controlnets loaded from {pretrained_model_path}.") if len(lowerCAmelCase__) == 0: raise ValueError( F"No ControlNets found under {os.path.dirname(lowerCAmelCase__)}. Expected at least {pretrained_model_path + '_0'}.") return cls(lowerCAmelCase__)
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase : List[str] = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = ["""PerceiverFeatureExtractor"""] lowerCAmelCase : Union[str, Any] = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def A_ ( ): SCREAMING_SNAKE_CASE_: List[Any] = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=_UpperCAmelCase , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=_UpperCAmelCase , default=5 ) parser.add_argument("--batch_size" , type=_UpperCAmelCase , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 ) parser.add_argument("--freeze" , type=_UpperCAmelCase , default=_UpperCAmelCase ) parser.add_argument("--learning_rate" , type=_UpperCAmelCase , default=5e-4 ) parser.add_argument("--seed" , type=_UpperCAmelCase , default=0 ) parser.add_argument("--lr_scheduler_type" , type=_UpperCAmelCase , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=_UpperCAmelCase , default=10 ) parser.add_argument("--weight_decay" , type=_UpperCAmelCase , default=0.0_1 ) parser.add_argument("--output_dir" , type=_UpperCAmelCase , default="./results" ) return parser.parse_args() lowerCAmelCase : Dict = load("""accuracy""") def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = eval_pred SCREAMING_SNAKE_CASE_: Dict = np.argmax(_UpperCAmelCase , axis=1 ) return metric.compute(predictions=_UpperCAmelCase , references=_UpperCAmelCase ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Tuple): super().__init__() SCREAMING_SNAKE_CASE_: Tuple = trainer def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any]): if control.should_evaluate: SCREAMING_SNAKE_CASE_: List[str] = deepcopy(lowerCAmelCase__) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train") return control_copy def A_ ( ): SCREAMING_SNAKE_CASE_: List[Any] = get_args() set_seed(args.seed ) SCREAMING_SNAKE_CASE_: Optional[int] = load_dataset("codeparrot/codecomplex" , split="train" ) SCREAMING_SNAKE_CASE_: Any = dataset.train_test_split(test_size=0.2 ) SCREAMING_SNAKE_CASE_: str = train_test["test"].train_test_split(test_size=0.5 ) SCREAMING_SNAKE_CASE_: Union[str, Any] = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.eos_token SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) SCREAMING_SNAKE_CASE_: Tuple = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): SCREAMING_SNAKE_CASE_: str = False SCREAMING_SNAKE_CASE_: Any = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer(example["src"] , truncation=_UpperCAmelCase , max_length=10_24 ) SCREAMING_SNAKE_CASE_: Union[str, Any] = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } SCREAMING_SNAKE_CASE_: str = train_test_validation.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=train_test_validation["train"].column_names , ) SCREAMING_SNAKE_CASE_: Optional[Any] = DataCollatorWithPadding(tokenizer=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) SCREAMING_SNAKE_CASE_: List[str] = Trainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , ) print("Training..." ) trainer.add_callback(CustomCallback(_UpperCAmelCase ) ) trainer.train() if __name__ == "__main__": main()
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: str = None if token is not None: SCREAMING_SNAKE_CASE_: int = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} SCREAMING_SNAKE_CASE_: Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: Optional[int] = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Any = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: str = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = requests.get(url + f"&page={i + 2}" , headers=_UpperCAmelCase ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: List[Any] = None if token is not None: SCREAMING_SNAKE_CASE_: Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} SCREAMING_SNAKE_CASE_: List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" SCREAMING_SNAKE_CASE_: int = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) SCREAMING_SNAKE_CASE_: int = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = requests.get(url + f"&page={i + 2}" , headers=_UpperCAmelCase ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = None if token is not None: SCREAMING_SNAKE_CASE_: Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} SCREAMING_SNAKE_CASE_: str = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = result.headers["Location"] SCREAMING_SNAKE_CASE_: Dict = requests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = os.path.join(_UpperCAmelCase , f"{artifact_name}.zip" ) with open(_UpperCAmelCase , "wb" ) as fp: fp.write(response.content ) def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: Any = [] SCREAMING_SNAKE_CASE_: Optional[int] = [] SCREAMING_SNAKE_CASE_: Dict = None with zipfile.ZipFile(_UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_UpperCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_UpperCAmelCase ) as f: for line in f: SCREAMING_SNAKE_CASE_: List[str] = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs SCREAMING_SNAKE_CASE_: Optional[Any] = line[: line.index(": " )] SCREAMING_SNAKE_CASE_: int = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed SCREAMING_SNAKE_CASE_: Optional[int] = line[len("FAILED " ) :] failed_tests.append(_UpperCAmelCase ) elif filename == "job_name.txt": SCREAMING_SNAKE_CASE_: List[Any] = line if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( f"`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` " f"and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) SCREAMING_SNAKE_CASE_: str = None if job_name and job_links: SCREAMING_SNAKE_CASE_: List[Any] = job_links.get(_UpperCAmelCase , _UpperCAmelCase ) # A list with elements of the form (line of error, error, failed test) SCREAMING_SNAKE_CASE_: Any = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase )] return result def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: str = [] SCREAMING_SNAKE_CASE_: str = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase ) ) return errors def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: List[str] = Counter() counter.update([x[1] for x in logs] ) SCREAMING_SNAKE_CASE_: Union[str, Any] = counter.most_common() SCREAMING_SNAKE_CASE_: Union[str, Any] = {} for error, count in counts: if error_filter is None or error not in error_filter: SCREAMING_SNAKE_CASE_: List[Any] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} SCREAMING_SNAKE_CASE_: Optional[Any] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) ) return r def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = test.split("::" )[0] if test.startswith("tests/models/" ): SCREAMING_SNAKE_CASE_: Union[str, Any] = test.split("/" )[2] else: SCREAMING_SNAKE_CASE_: Any = None return test def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_: str = [(x[0], x[1], get_model(x[2] )) for x in logs] SCREAMING_SNAKE_CASE_: Optional[Any] = [x for x in logs if x[2] is not None] SCREAMING_SNAKE_CASE_: Tuple = {x[2] for x in logs} SCREAMING_SNAKE_CASE_: Dict = {} for test in tests: SCREAMING_SNAKE_CASE_: List[Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) SCREAMING_SNAKE_CASE_: Any = counter.most_common() SCREAMING_SNAKE_CASE_: Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} SCREAMING_SNAKE_CASE_: int = sum(error_counts.values() ) if n_errors > 0: SCREAMING_SNAKE_CASE_: Optional[int] = {"count": n_errors, "errors": error_counts} SCREAMING_SNAKE_CASE_: Union[str, Any] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) ) return r def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = "| no. | error | status |" SCREAMING_SNAKE_CASE_: Optional[int] = "|-:|:-|:-|" SCREAMING_SNAKE_CASE_: Optional[int] = [header, sep] for error in reduced_by_error: SCREAMING_SNAKE_CASE_: Optional[Any] = reduced_by_error[error]["count"] SCREAMING_SNAKE_CASE_: Optional[int] = f"| {count} | {error[:1_00]} | |" lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = "| model | no. of errors | major error | count |" SCREAMING_SNAKE_CASE_: Any = "|-:|-:|-:|-:|" SCREAMING_SNAKE_CASE_: Union[str, Any] = [header, sep] for model in reduced_by_model: SCREAMING_SNAKE_CASE_: int = reduced_by_model[model]["count"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = list(reduced_by_model[model]["errors"].items() )[0] SCREAMING_SNAKE_CASE_: Dict = f"| {model} | {count} | {error[:60]} | {_count} |" lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") lowerCAmelCase : List[str] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowerCAmelCase : List[str] = get_job_links(args.workflow_run_id, token=args.token) lowerCAmelCase : Dict = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowerCAmelCase : Any = k.find(""" / """) lowerCAmelCase : str = k[index + len(""" / """) :] lowerCAmelCase : List[Any] = v with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowerCAmelCase : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowerCAmelCase : Tuple = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowerCAmelCase : Union[str, Any] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowerCAmelCase : Union[str, Any] = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowerCAmelCase : Union[str, Any] = reduce_by_error(errors) lowerCAmelCase : Dict = reduce_by_model(errors) lowerCAmelCase : str = make_github_table(reduced_by_error) lowerCAmelCase : List[Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa) with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp: fp.write(sa)
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
def A_ ( _UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[int] = StableUnCLIPImgaImgPipeline _UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _UpperCAmelCase : Tuple = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _UpperCAmelCase : List[Any] = frozenset([] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Tuple = 32 SCREAMING_SNAKE_CASE_: Union[str, Any] = embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE_: Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: str = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )) # regular denoising components torch.manual_seed(0) SCREAMING_SNAKE_CASE_: int = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) SCREAMING_SNAKE_CASE_: str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Optional[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Optional[int] = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Tuple = AutoencoderKL() SCREAMING_SNAKE_CASE_: Dict = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : List[str]=True): if str(lowerCAmelCase__).startswith("mps"): SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) if pil_image: SCREAMING_SNAKE_CASE_: Tuple = input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE_: List[Any] = input_image.clamp(0 , 1) SCREAMING_SNAKE_CASE_: Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy() SCREAMING_SNAKE_CASE_: Any = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__)[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: str = self.get_dummy_components() SCREAMING_SNAKE_CASE_: List[Any] = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = self.get_dummy_inputs(lowerCAmelCase__) inputs.update({"image_embeds": None}) SCREAMING_SNAKE_CASE_: List[str] = sd_pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_: Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _SCREAMING_SNAKE_CASE ( self : int): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__) @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png") SCREAMING_SNAKE_CASE_: List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy") SCREAMING_SNAKE_CASE_: Dict = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: Tuple = torch.Generator(device="cpu").manual_seed(0) SCREAMING_SNAKE_CASE_: Tuple = pipe(lowerCAmelCase__ , "anime turle" , generator=lowerCAmelCase__ , output_type="np") SCREAMING_SNAKE_CASE_: int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png") SCREAMING_SNAKE_CASE_: str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy") SCREAMING_SNAKE_CASE_: Tuple = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Generator(device="cpu").manual_seed(0) SCREAMING_SNAKE_CASE_: str = pipe(lowerCAmelCase__ , "anime turle" , generator=lowerCAmelCase__ , output_type="np") SCREAMING_SNAKE_CASE_: Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png") torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_: List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_: List[str] = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: Any = pipe( lowerCAmelCase__ , "anime turtle" , num_inference_steps=2 , output_type="np" , ) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
def A_ ( _UpperCAmelCase ): assert column_title.isupper() SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase ) - 1 SCREAMING_SNAKE_CASE_: Dict = 0 while index >= 0: SCREAMING_SNAKE_CASE_: List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Union[str, Any] = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
import functools def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # Validation if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(_UpperCAmelCase ) == 0: return 0 if min(_UpperCAmelCase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(_UpperCAmelCase ) >= 3_66: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = set(_UpperCAmelCase ) @functools.cache def dynamic_programming(_UpperCAmelCase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
from collections.abc import Callable def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: float = a SCREAMING_SNAKE_CASE_: float = b if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function return a elif function(_UpperCAmelCase ) == 0: return b elif ( function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: SCREAMING_SNAKE_CASE_: float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_UpperCAmelCase ) == 0: return mid elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0: SCREAMING_SNAKE_CASE_: Optional[int] = mid else: SCREAMING_SNAKE_CASE_: str = mid SCREAMING_SNAKE_CASE_: List[Any] = start + (end - start) / 2.0 return mid def A_ ( _UpperCAmelCase ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
import argparse from collections import defaultdict import yaml lowerCAmelCase : Dict = """docs/source/en/_toctree.yml""" def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = defaultdict(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = [] SCREAMING_SNAKE_CASE_: Union[str, Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = new_doc_list SCREAMING_SNAKE_CASE_: str = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_: int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_: Any = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_UpperCAmelCase ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) SCREAMING_SNAKE_CASE_: str = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_UpperCAmelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_UpperCAmelCase ) # Sort return overview_doc def A_ ( _UpperCAmelCase=False ): with open(_UpperCAmelCase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_: List[str] = content[api_idx]["sections"] # Then to the model doc SCREAMING_SNAKE_CASE_: Optional[int] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 SCREAMING_SNAKE_CASE_: List[str] = api_doc[scheduler_idx]["sections"] SCREAMING_SNAKE_CASE_: int = clean_doc_toc(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = False if new_scheduler_doc != scheduler_doc: SCREAMING_SNAKE_CASE_: List[str] = True if overwrite: SCREAMING_SNAKE_CASE_: List[Any] = new_scheduler_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_: Union[str, Any] = api_doc with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def A_ ( _UpperCAmelCase=False ): with open(_UpperCAmelCase , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_: Tuple = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = content[api_idx]["sections"] # Then to the model doc SCREAMING_SNAKE_CASE_: List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 SCREAMING_SNAKE_CASE_: str = False SCREAMING_SNAKE_CASE_: Any = api_doc[pipeline_idx]["sections"] SCREAMING_SNAKE_CASE_: Dict = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline_doc["section"] SCREAMING_SNAKE_CASE_: Dict = clean_doc_toc(_UpperCAmelCase ) if overwrite: SCREAMING_SNAKE_CASE_: List[str] = new_sub_pipeline_doc new_pipeline_docs.append(_UpperCAmelCase ) # sort overall pipeline doc SCREAMING_SNAKE_CASE_: Optional[int] = clean_doc_toc(_UpperCAmelCase ) if new_pipeline_docs != pipeline_docs: SCREAMING_SNAKE_CASE_: int = True if overwrite: SCREAMING_SNAKE_CASE_: Dict = new_pipeline_docs if diff: if overwrite: SCREAMING_SNAKE_CASE_: int = api_doc with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowerCAmelCase : Any = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
from PIL import Image def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(_UpperCAmelCase ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(_UpperCAmelCase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 lowerCAmelCase : int = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCAmelCase : Union[str, Any] = """\ @inproceedings{popovic-2015-chrf, title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\", month = sep, year = \"2015\", address = \"Lisbon, Portugal\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W15-3049\", doi = \"10.18653/v1/W15-3049\", pages = \"392--395\", } @inproceedings{popovic-2017-chrf, title = \"chr{F}++: words helping character n-grams\", author = \"Popovi{\'c}, Maja\", booktitle = \"Proceedings of the Second Conference on Machine Translation\", month = sep, year = \"2017\", address = \"Copenhagen, Denmark\", publisher = \"Association for Computational Linguistics\", url = \"https://aclanthology.org/W17-4770\", doi = \"10.18653/v1/W17-4770\", pages = \"612--618\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ lowerCAmelCase : str = """\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. """ lowerCAmelCase : str = """ Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: 'score' (float): The chrF (chrF++) score, 'char_order' (int): The character n-gram order, 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, 'beta' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"] >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]] >>> chrf = datasets.load_metric(\"chrf\") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : str): if version.parse(scb.__version__) < version.parse("1.4.12"): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`.") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence"), "references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"), }) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int = CHRF.CHAR_ORDER , lowerCAmelCase__ : int = CHRF.WORD_ORDER , lowerCAmelCase__ : int = CHRF.BETA , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ): SCREAMING_SNAKE_CASE_: int = len(references[0]) if any(len(lowerCAmelCase__) != references_per_prediction for refs in references): raise ValueError("Sacrebleu requires the same number of references for each prediction") SCREAMING_SNAKE_CASE_: str = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)] SCREAMING_SNAKE_CASE_: Optional[Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = ["""model.decoder.embed_positions.weights"""] def A_ ( _UpperCAmelCase ): if "emb" in name: SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: SCREAMING_SNAKE_CASE_: Tuple = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: SCREAMING_SNAKE_CASE_: str = name.replace("linear1" , "fc1" ) if "linear2" in name: SCREAMING_SNAKE_CASE_: Optional[Any] = name.replace("linear2" , "fc2" ) if "norm1" in name: SCREAMING_SNAKE_CASE_: List[str] = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: SCREAMING_SNAKE_CASE_: Dict = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: SCREAMING_SNAKE_CASE_: Tuple = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: SCREAMING_SNAKE_CASE_: List[str] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: SCREAMING_SNAKE_CASE_: Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = list(state_dict.keys() ) SCREAMING_SNAKE_CASE_: Dict = {} for key in keys: SCREAMING_SNAKE_CASE_: int = state_dict.pop(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = rename_keys(_UpperCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj SCREAMING_SNAKE_CASE_: List[str] = val[:hidden_size, :] SCREAMING_SNAKE_CASE_: Optional[Any] = val[hidden_size : 2 * hidden_size, :] SCREAMING_SNAKE_CASE_: Optional[int] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: SCREAMING_SNAKE_CASE_: str = val else: SCREAMING_SNAKE_CASE_: str = val return state_dict, enc_dec_proj_state_dict def A_ ( _UpperCAmelCase ): if checkpoint == "small": # default config values SCREAMING_SNAKE_CASE_: Any = 10_24 SCREAMING_SNAKE_CASE_: Union[str, Any] = 24 SCREAMING_SNAKE_CASE_: List[str] = 16 elif checkpoint == "medium": SCREAMING_SNAKE_CASE_: Optional[Any] = 15_36 SCREAMING_SNAKE_CASE_: int = 48 SCREAMING_SNAKE_CASE_: Any = 24 elif checkpoint == "large": SCREAMING_SNAKE_CASE_: int = 20_48 SCREAMING_SNAKE_CASE_: List[Any] = 48 SCREAMING_SNAKE_CASE_: List[Any] = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) SCREAMING_SNAKE_CASE_: int = MusicgenDecoderConfig( hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , ) return config @torch.no_grad() def A_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ): SCREAMING_SNAKE_CASE_: int = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = decoder_config_from_checkpoint(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = fairseq_model.lm.state_dict() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = rename_state_dict( _UpperCAmelCase , hidden_size=decoder_config.hidden_size ) SCREAMING_SNAKE_CASE_: Any = TaEncoderModel.from_pretrained("t5-base" ) SCREAMING_SNAKE_CASE_: Optional[Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" ) SCREAMING_SNAKE_CASE_: Tuple = MusicgenForCausalLM(_UpperCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(_UpperCAmelCase ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model SCREAMING_SNAKE_CASE_: Dict = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase ) # check we can do a forward pass SCREAMING_SNAKE_CASE_: Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) SCREAMING_SNAKE_CASE_: List[str] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[Any] = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits if logits.shape != (8, 1, 20_48): raise ValueError("Incorrect shape for logits" ) # now construct the processor SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained("t5-base" ) SCREAMING_SNAKE_CASE_: Any = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) SCREAMING_SNAKE_CASE_: List[Any] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # set the appropriate bos/pad token ids SCREAMING_SNAKE_CASE_: Optional[int] = 20_48 SCREAMING_SNAKE_CASE_: str = 20_48 # set other default generation config params SCREAMING_SNAKE_CASE_: Optional[int] = int(30 * audio_encoder.config.frame_rate ) SCREAMING_SNAKE_CASE_: str = True SCREAMING_SNAKE_CASE_: List[str] = 3.0 if pytorch_dump_folder is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(_UpperCAmelCase ) processor.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , ): SCREAMING_SNAKE_CASE_: Tuple = {} if train_file is not None: SCREAMING_SNAKE_CASE_: Optional[Any] = [train_file] if eval_file is not None: SCREAMING_SNAKE_CASE_: Optional[int] = [eval_file] if test_file is not None: SCREAMING_SNAKE_CASE_: List[Any] = [test_file] SCREAMING_SNAKE_CASE_: List[Any] = datasets.load_dataset("csv" , data_files=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = list(ds[list(files.keys() )[0]].features.keys() ) SCREAMING_SNAKE_CASE_: Optional[Any] = features_name.pop(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) ) SCREAMING_SNAKE_CASE_: List[str] = {label: i for i, label in enumerate(_UpperCAmelCase )} SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.model_input_names SCREAMING_SNAKE_CASE_: Tuple = {} if len(_UpperCAmelCase ) == 1: for k in files.keys(): SCREAMING_SNAKE_CASE_: Union[str, Any] = ds[k].map( lambda _UpperCAmelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) , batched=_UpperCAmelCase , ) elif len(_UpperCAmelCase ) == 2: for k in files.keys(): SCREAMING_SNAKE_CASE_: Optional[int] = ds[k].map( lambda _UpperCAmelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , ) , batched=_UpperCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: SCREAMING_SNAKE_CASE_: int = {k: v for k, v in ex.items() if k in input_names} SCREAMING_SNAKE_CASE_: Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: SCREAMING_SNAKE_CASE_: List[Any] = {k: v for k, v in ex.items() if k in input_names} SCREAMING_SNAKE_CASE_: List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: SCREAMING_SNAKE_CASE_: Dict = {k: v for k, v in ex.items() if k in input_names} SCREAMING_SNAKE_CASE_: Tuple = labelaid[ex[label_name]] yield (d, label) SCREAMING_SNAKE_CASE_: Union[str, Any] = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: SCREAMING_SNAKE_CASE_: Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) SCREAMING_SNAKE_CASE_: List[Any] = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: SCREAMING_SNAKE_CASE_: str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) SCREAMING_SNAKE_CASE_: Tuple = ( tf.data.Dataset.from_generator( _UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: SCREAMING_SNAKE_CASE_: int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase : Tuple = logging.getLogger(__name__) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : int = field(metadata={'''help''': '''Which column contains the label'''} ) _UpperCAmelCase : str = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the training file'''} ) _UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the development file'''} ) _UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'''help''': '''The path of the test file'''} ) _UpperCAmelCase : int = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) _UpperCAmelCase : bool = field( default=UpperCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase : bool = field(default=UpperCAmelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase : Optional[str] = field( default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def A_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE_: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, " f"16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) SCREAMING_SNAKE_CASE_: Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): SCREAMING_SNAKE_CASE_: int = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(_UpperCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE_: Any = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer SCREAMING_SNAKE_CASE_: str = TFTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE_: Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) SCREAMING_SNAKE_CASE_: List[Any] = trainer.evaluate() SCREAMING_SNAKE_CASE_: int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(_UpperCAmelCase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f" {key} = {value}" ) writer.write(f"{key} = {value}\n" ) results.update(_UpperCAmelCase ) return results if __name__ == "__main__": main()
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : int _UpperCAmelCase : int class __lowercase : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)] SCREAMING_SNAKE_CASE_: Tuple = size def __getitem__( self : str , lowerCAmelCase__ : int): return iter(self._graph[vertex]) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self._size def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int): if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Any = deque([start_vertex]) SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size SCREAMING_SNAKE_CASE_: Dict = 0 while queue: SCREAMING_SNAKE_CASE_: Dict = queue.popleft() SCREAMING_SNAKE_CASE_: List[str] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: SCREAMING_SNAKE_CASE_: Tuple = current_distance + edge.weight SCREAMING_SNAKE_CASE_: Union[str, Any] = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase__ , lowerCAmelCase__) and new_distance >= dest_vertex_distance ): continue SCREAMING_SNAKE_CASE_: List[Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = emb.weight.shape SCREAMING_SNAKE_CASE_: str = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = emb.weight.data return lin_layer def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = torch.load(_UpperCAmelCase , map_location="cpu" ) SCREAMING_SNAKE_CASE_: Optional[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"] SCREAMING_SNAKE_CASE_: Union[str, Any] = mam_aaa["model"] remove_ignore_keys_(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = state_dict["encoder.embed_tokens.weight"].shape[0] SCREAMING_SNAKE_CASE_: Dict = MaMaaaConfig( vocab_size=_UpperCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) SCREAMING_SNAKE_CASE_: str = state_dict["decoder.embed_tokens.weight"] SCREAMING_SNAKE_CASE_: Optional[Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase ) model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCAmelCase : Any = parser.parse_args() lowerCAmelCase : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
from __future__ import annotations def A_ ( _UpperCAmelCase , _UpperCAmelCase ): if b == 0: return (1, 0) ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Any = extended_euclid(_UpperCAmelCase , a % b ) SCREAMING_SNAKE_CASE_: List[Any] = a // b return (y, x - k * y) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Optional[Any] = extended_euclid(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = na * na SCREAMING_SNAKE_CASE_: Tuple = ra * x * na + ra * y * na return (n % m + m) % m def A_ ( _UpperCAmelCase , _UpperCAmelCase ): ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): List[Any] = extended_euclid(_UpperCAmelCase , _UpperCAmelCase ) if b < 0: SCREAMING_SNAKE_CASE_: Optional[Any] = (b % n + n) % n return b def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = invert_modulo(_UpperCAmelCase , _UpperCAmelCase ), invert_modulo(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = na * na SCREAMING_SNAKE_CASE_: Any = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="""chinese_remainder_theorem""", verbose=True) testmod(name="""chinese_remainder_theorem2""", verbose=True) testmod(name="""invert_modulo""", verbose=True) testmod(name="""extended_euclid""", verbose=True)
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): SCREAMING_SNAKE_CASE_: str = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): SCREAMING_SNAKE_CASE_: List[Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: SCREAMING_SNAKE_CASE_: Tuple = subset[i - 1][j] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE_: int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[Any]=10 , lowerCAmelCase__ : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : str=[1, 1, 2, 1] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]="relu" , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : List[Any]=None , ): SCREAMING_SNAKE_CASE_: Optional[int] = parent SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size SCREAMING_SNAKE_CASE_: List[Any] = image_size SCREAMING_SNAKE_CASE_: Tuple = num_channels SCREAMING_SNAKE_CASE_: List[str] = embeddings_size SCREAMING_SNAKE_CASE_: Dict = hidden_sizes SCREAMING_SNAKE_CASE_: int = depths SCREAMING_SNAKE_CASE_: Optional[Any] = is_training SCREAMING_SNAKE_CASE_: Optional[Any] = use_labels SCREAMING_SNAKE_CASE_: Dict = hidden_act SCREAMING_SNAKE_CASE_: int = num_labels SCREAMING_SNAKE_CASE_: Tuple = scope SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_: List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_: Any = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = RegNetModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: str = self.num_labels SCREAMING_SNAKE_CASE_: str = RegNetForImageClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = config_and_inputs SCREAMING_SNAKE_CASE_: List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : List[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _UpperCAmelCase : int = ( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : List[str] = False _UpperCAmelCase : Any = False def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: int = RegNetModelTester(self) SCREAMING_SNAKE_CASE_: List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : str): return @unittest.skip(reason="RegNet does not use inputs_embeds") def _SCREAMING_SNAKE_CASE ( self : int): pass @unittest.skip(reason="RegNet does not support input and output embeddings") def _SCREAMING_SNAKE_CASE ( self : Any): pass def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_: str = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_: List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: Optional[int] = model_class(config=lowerCAmelCase__) for name, module in model.named_modules(): if isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) def _SCREAMING_SNAKE_CASE ( self : Any): def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase__) , expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_: Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE_: Optional[int] = layer_type SCREAMING_SNAKE_CASE_: Union[str, Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_: int = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: Optional[Any] = RegNetModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def A_ ( ): SCREAMING_SNAKE_CASE_: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE_: Tuple = prepare_img() SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**lowerCAmelCase__) # verify the logits SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowerCAmelCase__) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 1 SCREAMING_SNAKE_CASE_: Dict = 2 while i * i <= n: SCREAMING_SNAKE_CASE_: int = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def A_ ( ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 SCREAMING_SNAKE_CASE_: Optional[int] = 1 while True: i += 1 t_num += i if count_divisors(_UpperCAmelCase ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Dict = logging.get_logger() @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : nn.Module _UpperCAmelCase : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) _UpperCAmelCase : list = field(default_factory=UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Tensor): SCREAMING_SNAKE_CASE_: Tuple = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__ , nn.Convad) or isinstance(lowerCAmelCase__ , nn.BatchNormad) if has_not_submodules: self.traced.append(lowerCAmelCase__) def __call__( self : Union[str, Any] , lowerCAmelCase__ : Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(lowerCAmelCase__) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0 , self.traced)) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : nn.Module _UpperCAmelCase : nn.Module _UpperCAmelCase : int = 0 _UpperCAmelCase : List = field(default_factory=UpperCAmelCase_ ) _UpperCAmelCase : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , lowerCAmelCase__ : Tensor): SCREAMING_SNAKE_CASE_: str = Tracker(self.dest)(lowerCAmelCase__).parametrized SCREAMING_SNAKE_CASE_: Dict = Tracker(self.src)(lowerCAmelCase__).parametrized SCREAMING_SNAKE_CASE_: str = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip , lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Dict = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip , lowerCAmelCase__)) if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise Exception( F"Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while" F" destination module has {len(lowerCAmelCase__)}.") for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(F"Transfered from={src_m} to={dest_m}") def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ): print(f"Converting {name}..." ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: str = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval() SCREAMING_SNAKE_CASE_: str = ResNetForImageClassification(_UpperCAmelCase ).eval() SCREAMING_SNAKE_CASE_: Union[str, Any] = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_UpperCAmelCase ) assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE_: Optional[int] = f"resnet{'-'.join(name.split('resnet' ) )}" print(_UpperCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCAmelCase , ) # we can use the convnext one SCREAMING_SNAKE_CASE_: Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCAmelCase , ) print(f"Pushed {checkpoint_name}" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ): SCREAMING_SNAKE_CASE_: Dict = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE_: Tuple = 10_00 SCREAMING_SNAKE_CASE_: Tuple = (1, num_labels) SCREAMING_SNAKE_CASE_: List[str] = "huggingface/label-files" SCREAMING_SNAKE_CASE_: Any = num_labels SCREAMING_SNAKE_CASE_: List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_: Union[str, Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: List[str] = idalabel SCREAMING_SNAKE_CASE_: Dict = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Optional[int] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = { "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, expected_shape if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowerCAmelCase : Dict = parser.parse_args() lowerCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
from manim import * class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = Rectangle(height=0.5 , width=0.5) SCREAMING_SNAKE_CASE_: str = Rectangle(height=0.46 , width=0.46).set_stroke(width=0) SCREAMING_SNAKE_CASE_: Any = [mem.copy() for i in range(6)] SCREAMING_SNAKE_CASE_: Any = [mem.copy() for i in range(6)] SCREAMING_SNAKE_CASE_: Any = VGroup(*lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0) SCREAMING_SNAKE_CASE_: Dict = VGroup(*lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0) SCREAMING_SNAKE_CASE_: str = VGroup(lowerCAmelCase__ , lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0) SCREAMING_SNAKE_CASE_: Dict = Text("CPU" , font_size=24) SCREAMING_SNAKE_CASE_: Dict = Group(lowerCAmelCase__ , lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__) cpu.move_to([-2.5, -0.5, 0]) self.add(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = [mem.copy() for i in range(1)] SCREAMING_SNAKE_CASE_: Dict = VGroup(*lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0) SCREAMING_SNAKE_CASE_: Union[str, Any] = Text("GPU" , font_size=24) SCREAMING_SNAKE_CASE_: Dict = Group(lowerCAmelCase__ , lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__) gpu.align_to(lowerCAmelCase__ , lowerCAmelCase__) gpu.set_x(gpu.get_x() - 1) self.add(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = [mem.copy() for i in range(6)] SCREAMING_SNAKE_CASE_: List[str] = VGroup(*lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0) SCREAMING_SNAKE_CASE_: List[str] = Text("Model" , font_size=24) SCREAMING_SNAKE_CASE_: str = Group(lowerCAmelCase__ , lowerCAmelCase__).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__) model.move_to([3, -1.0, 0]) self.play( Create(lowerCAmelCase__ , run_time=1) , Create(lowerCAmelCase__ , run_time=1) , Create(lowerCAmelCase__ , run_time=1) , ) SCREAMING_SNAKE_CASE_: Dict = MarkupText( F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , ) SCREAMING_SNAKE_CASE_: Optional[int] = Square(side_length=2.2) key.move_to([-5, 2, 0]) SCREAMING_SNAKE_CASE_: Tuple = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0]) step_a.move_to([2, 2, 0]) self.play(Write(lowerCAmelCase__ , run_time=2.5) , Write(lowerCAmelCase__) , Write(lowerCAmelCase__)) self.add(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = [] SCREAMING_SNAKE_CASE_: Union[str, Any] = [] SCREAMING_SNAKE_CASE_: List[str] = [] for i, rect in enumerate(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowerCAmelCase__ , opacity=0.7) cpu_target.move_to(lowerCAmelCase__) cpu_target.generate_target() SCREAMING_SNAKE_CASE_: Optional[int] = 0.46 / 4 SCREAMING_SNAKE_CASE_: str = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowerCAmelCase__) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase__ , buff=0.0) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase__ , buff=0.0) cpu_targs.append(lowerCAmelCase__) first_animations.append(rect.animate(run_time=0.5).set_stroke(lowerCAmelCase__)) second_animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5)) self.play(*lowerCAmelCase__) self.play(*lowerCAmelCase__) self.wait()
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
class __lowercase : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : list): SCREAMING_SNAKE_CASE_: Union[str, Any] = set_counts SCREAMING_SNAKE_CASE_: Optional[Any] = max(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = [1] * num_sets SCREAMING_SNAKE_CASE_: Union[str, Any] = list(range(lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.get_parent(lowerCAmelCase__) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE_: Dict = 0 SCREAMING_SNAKE_CASE_: List[str] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE_: Tuple = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE_: str = 0 SCREAMING_SNAKE_CASE_: Optional[Any] = src_parent SCREAMING_SNAKE_CASE_: str = self.set_counts[src_parent] SCREAMING_SNAKE_CASE_: List[Any] = max(self.max_set , lowerCAmelCase__) return True def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int): if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE_: Dict = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import argparse import os import re lowerCAmelCase : int = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase : Tuple = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings lowerCAmelCase : List[str] = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""") def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ): with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: List[str] = f.read() SCREAMING_SNAKE_CASE_: Dict = content.split("\n" ) SCREAMING_SNAKE_CASE_: int = [] SCREAMING_SNAKE_CASE_: List[str] = 0 while line_idx < len(_UpperCAmelCase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: SCREAMING_SNAKE_CASE_: Optional[Any] = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 SCREAMING_SNAKE_CASE_: List[Any] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": SCREAMING_SNAKE_CASE_: Tuple = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : _re_identifier.search(_UpperCAmelCase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write("\n".join(_UpperCAmelCase ) ) elif "\n".join(_UpperCAmelCase ) != content: return True def A_ ( _UpperCAmelCase = False ): SCREAMING_SNAKE_CASE_: Dict = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for f in os.listdir(_UpperCAmelCase ) if f.endswith(".py" )] SCREAMING_SNAKE_CASE_: Optional[int] = [sort_auto_mapping(_UpperCAmelCase , overwrite=_UpperCAmelCase ) for fname in fnames] if not overwrite and any(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = [f for f, d in zip(_UpperCAmelCase , _UpperCAmelCase ) if d] raise ValueError( f"The following files have auto mappings that need sorting: {', '.join(_UpperCAmelCase )}. Run `make style` to fix" " this." ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowerCAmelCase : str = parser.parse_args() sort_all_auto_mappings(not args.check_only)
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: List[str] = len(_UpperCAmelCase ) for i in range(n - 1 ): for j in range(i + 1 , _UpperCAmelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def A_ ( _UpperCAmelCase ): if len(_UpperCAmelCase ) <= 1: return arr, 0 SCREAMING_SNAKE_CASE_: Dict = len(_UpperCAmelCase ) // 2 SCREAMING_SNAKE_CASE_: List[Any] = arr[0:mid] SCREAMING_SNAKE_CASE_: Tuple = arr[mid:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = count_inversions_recursive(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = count_inversions_recursive(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = inversion_p + inversions_q + cross_inversions return c, num_inversions def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = [] SCREAMING_SNAKE_CASE_: str = 0 while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_UpperCAmelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_UpperCAmelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def A_ ( ): SCREAMING_SNAKE_CASE_: Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) SCREAMING_SNAKE_CASE_: str = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , _UpperCAmelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() SCREAMING_SNAKE_CASE_: Optional[int] = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _UpperCAmelCase ) # an empty list should also have zero inversions SCREAMING_SNAKE_CASE_: List[Any] = [] SCREAMING_SNAKE_CASE_: List[Any] = count_inversions_bf(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = count_inversions_recursive(_UpperCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , _UpperCAmelCase ) if __name__ == "__main__": main()
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = len(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = len(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE_: List[Any] = True for i in range(_UpperCAmelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE_: Union[str, Any] = True if a[i].islower(): SCREAMING_SNAKE_CASE_: int = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : str , lowerCAmelCase__ : NestedDataStructureLike[PathLike] , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Dict , ): super().__init__( lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: str = field SCREAMING_SNAKE_CASE_: Dict = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE_: Union[str, Any] = Json( cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , field=lowerCAmelCase__ , **lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): # Build iterable dataset if self.streaming: SCREAMING_SNAKE_CASE_: Optional[int] = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE_: Optional[int] = None SCREAMING_SNAKE_CASE_: List[Any] = None SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: Optional[Any] = None self.builder.download_and_prepare( download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE_: Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory) return dataset class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Union[PathLike, BinaryIO] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : str , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0.") SCREAMING_SNAKE_CASE_: int = dataset SCREAMING_SNAKE_CASE_: int = path_or_buf SCREAMING_SNAKE_CASE_: Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE SCREAMING_SNAKE_CASE_: str = num_proc SCREAMING_SNAKE_CASE_: Optional[int] = "utf-8" SCREAMING_SNAKE_CASE_: List[Any] = to_json_kwargs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = self.to_json_kwargs.pop("path_or_buf" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = self.to_json_kwargs.pop("orient" , "records") SCREAMING_SNAKE_CASE_: Optional[Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False) SCREAMING_SNAKE_CASE_: List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True) SCREAMING_SNAKE_CASE_: Optional[Any] = self.to_json_kwargs.pop("compression" , lowerCAmelCase__) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"`datasets` currently does not support {compression} compression") if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf , "wb" , compression=lowerCAmelCase__) as buffer: SCREAMING_SNAKE_CASE_: Union[str, Any] = self._write(file_obj=lowerCAmelCase__ , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **self.to_json_kwargs) else: if compression: raise NotImplementedError( F"The compression parameter is not supported when writing to a buffer, but compression={compression}" " was passed. Please provide a local path instead.") SCREAMING_SNAKE_CASE_: List[Any] = self._write( file_obj=self.path_or_buf , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **self.to_json_kwargs) return written def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = args SCREAMING_SNAKE_CASE_: str = query_table( table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size) , indices=self.dataset._indices , ) SCREAMING_SNAKE_CASE_: int = batch.to_pandas().to_json( path_or_buf=lowerCAmelCase__ , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **lowerCAmelCase__) if not json_str.endswith("\n"): json_str += "\n" return json_str.encode(self.encoding) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : BinaryIO , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): SCREAMING_SNAKE_CASE_: Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs)) written += file_obj.write(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): written += file_obj.write(lowerCAmelCase__) return written
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: str = str(_UpperCAmelCase ) while len(_UpperCAmelCase ) != 1: SCREAMING_SNAKE_CASE_: Optional[int] = [int(_UpperCAmelCase ) for i in num_string] SCREAMING_SNAKE_CASE_: Any = 1 for i in range(0 , len(_UpperCAmelCase ) ): total *= numbers[i] SCREAMING_SNAKE_CASE_: Any = str(_UpperCAmelCase ) steps += 1 return steps def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 SCREAMING_SNAKE_CASE_: Tuple = str(_UpperCAmelCase ) while len(_UpperCAmelCase ) != 1: SCREAMING_SNAKE_CASE_: Optional[Any] = [int(_UpperCAmelCase ) for i in num_string] SCREAMING_SNAKE_CASE_: Optional[int] = 0 for i in range(0 , len(_UpperCAmelCase ) ): total += numbers[i] SCREAMING_SNAKE_CASE_: Optional[int] = str(_UpperCAmelCase ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase : Dict = """hf-internal-testing/tiny-random-bert""" lowerCAmelCase : List[Any] = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") lowerCAmelCase : Any = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = cached_file(lowerCAmelCase__ , lowerCAmelCase__) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowerCAmelCase__)) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase__ , lowerCAmelCase__))) with open(os.path.join(lowerCAmelCase__ , "refs" , "main")) as f: SCREAMING_SNAKE_CASE_: int = f.read() self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__)) self.assertTrue(os.path.isfile(lowerCAmelCase__)) # File is cached at the same place the second time. SCREAMING_SNAKE_CASE_: List[str] = cached_file(lowerCAmelCase__ , lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) # Using a specific revision to test the full commit hash. SCREAMING_SNAKE_CASE_: Tuple = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="9b8c223") self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "snapshots" , lowerCAmelCase__ , lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Any): with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier"): SCREAMING_SNAKE_CASE_: Optional[Any] = cached_file("tiny-random-bert" , lowerCAmelCase__) with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier"): SCREAMING_SNAKE_CASE_: Optional[Any] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="aaaa") with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named"): SCREAMING_SNAKE_CASE_: str = cached_file(lowerCAmelCase__ , "conf") def _SCREAMING_SNAKE_CASE ( self : Dict): with self.assertRaisesRegex(lowerCAmelCase__ , "does not appear to have a file named"): SCREAMING_SNAKE_CASE_: Any = cached_file(lowerCAmelCase__ , "conf") with open(os.path.join(lowerCAmelCase__ , "refs" , "main")) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , ".no_exist" , lowerCAmelCase__ , "conf"))) SCREAMING_SNAKE_CASE_: Optional[int] = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase__) self.assertIsNone(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = cached_file(lowerCAmelCase__ , "conf" , local_files_only=lowerCAmelCase__ , _raise_exceptions_for_missing_entries=lowerCAmelCase__) self.assertIsNone(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = mock.Mock() SCREAMING_SNAKE_CASE_: int = 500 SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Optional[int] = HTTPError SCREAMING_SNAKE_CASE_: Tuple = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__) as mock_head: SCREAMING_SNAKE_CASE_: Dict = cached_file(lowerCAmelCase__ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase__) self.assertIsNone(lowerCAmelCase__) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : str): self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase__)) def _SCREAMING_SNAKE_CASE ( self : Tuple): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt")) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid model identifier"): get_file_from_repo("bert-base-case" , lowerCAmelCase__) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowerCAmelCase__ , "is not a valid git identifier"): get_file_from_repo("bert-base-cased" , lowerCAmelCase__ , revision="ahaha") SCREAMING_SNAKE_CASE_: str = get_file_from_repo("bert-base-cased" , lowerCAmelCase__) # The name is the cached name which is not very easy to test, so instead we load the content. SCREAMING_SNAKE_CASE_: Dict = json.loads(open(lowerCAmelCase__ , "r").read()) self.assertEqual(config["hidden_size"] , 768) def _SCREAMING_SNAKE_CASE ( self : List[str]): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_: Optional[Any] = Path(lowerCAmelCase__) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowerCAmelCase__ , "a.txt") , str(lowerCAmelCase__)) self.assertIsNone(get_file_from_repo(lowerCAmelCase__ , "b.txt"))
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[int] = AudioLDMPipeline _UpperCAmelCase : List[Any] = TEXT_TO_AUDIO_PARAMS _UpperCAmelCase : Any = TEXT_TO_AUDIO_BATCH_PARAMS _UpperCAmelCase : Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): torch.manual_seed(0) SCREAMING_SNAKE_CASE_: str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Dict = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Dict = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) SCREAMING_SNAKE_CASE_: List[Any] = ClapTextModelWithProjection(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77) SCREAMING_SNAKE_CASE_: Dict = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Any = SpeechTaHifiGan(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict=0): if str(lowerCAmelCase__).startswith("mps"): SCREAMING_SNAKE_CASE_: Dict = torch.manual_seed(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: str = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: int = self.get_dummy_components() SCREAMING_SNAKE_CASE_: List[str] = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = audioldm_pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = output.audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) == 256 SCREAMING_SNAKE_CASE_: Dict = audio[:10] SCREAMING_SNAKE_CASE_: List[Any] = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Tuple = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = 3 * [inputs["prompt"]] # forward SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = output.audios[0] SCREAMING_SNAKE_CASE_: Any = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = 3 * [inputs.pop("prompt")] SCREAMING_SNAKE_CASE_: Tuple = audioldm_pipe.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_: List[Any] = text_inputs["input_ids"].to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe.text_encoder( lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Dict = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_: str = F.normalize(lowerCAmelCase__ , dim=-1) SCREAMING_SNAKE_CASE_: Any = prompt_embeds # forward SCREAMING_SNAKE_CASE_: Optional[Any] = audioldm_pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = audioldm_pipe.to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = 3 * ["this is a negative prompt"] SCREAMING_SNAKE_CASE_: str = negative_prompt SCREAMING_SNAKE_CASE_: Optional[int] = 3 * [inputs["prompt"]] # forward SCREAMING_SNAKE_CASE_: Tuple = audioldm_pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.audios[0] SCREAMING_SNAKE_CASE_: str = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = 3 * [inputs.pop("prompt")] SCREAMING_SNAKE_CASE_: Optional[Any] = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_: Dict = text_inputs["input_ids"].to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe.text_encoder( lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Any = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE_: Optional[Any] = F.normalize(lowerCAmelCase__ , dim=-1) embeds.append(lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = embeds # forward SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: Any = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = "egg cracking" SCREAMING_SNAKE_CASE_: Dict = audioldm_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = output.audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) == 256 SCREAMING_SNAKE_CASE_: Optional[int] = audio[:10] SCREAMING_SNAKE_CASE_: List[str] = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032]) assert np.abs(audio_slice - expected_slice).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE_: str = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE_: Tuple = 2 SCREAMING_SNAKE_CASE_: Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE_: str = 2 SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE_: int = 2 SCREAMING_SNAKE_CASE_: Optional[Any] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: int = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE_: List[Any] = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = output.audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) / vocoder_sampling_rate == 0.016 SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = output.audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) / vocoder_sampling_rate == 0.032 def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = ["hey"] SCREAMING_SNAKE_CASE_: Tuple = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1) SCREAMING_SNAKE_CASE_: List[str] = output.audios.shape assert audio_shape == (1, 256) SCREAMING_SNAKE_CASE_: str = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE_: Optional[int] = SpeechTaHifiGan(lowerCAmelCase__).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1) SCREAMING_SNAKE_CASE_: Any = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : Any): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase__) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__) @slow class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int="cpu" , lowerCAmelCase__ : List[Any]=torch.floataa , lowerCAmelCase__ : str=0): SCREAMING_SNAKE_CASE_: Tuple = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = np.random.RandomState(lowerCAmelCase__).standard_normal((1, 8, 128, 16)) SCREAMING_SNAKE_CASE_: Optional[int] = torch.from_numpy(lowerCAmelCase__).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm") SCREAMING_SNAKE_CASE_: List[Any] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = self.get_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 25 SCREAMING_SNAKE_CASE_: str = audioldm_pipe(**lowerCAmelCase__).audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) == 8_1920 SCREAMING_SNAKE_CASE_: Union[str, Any] = audio[7_7230:7_7240] SCREAMING_SNAKE_CASE_: Any = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315]) SCREAMING_SNAKE_CASE_: Union[str, Any] = np.abs(expected_slice - audio_slice).max() assert max_diff < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Any = AudioLDMPipeline.from_pretrained("cvssp/audioldm") SCREAMING_SNAKE_CASE_: int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) SCREAMING_SNAKE_CASE_: Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.get_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = audioldm_pipe(**lowerCAmelCase__).audios[0] assert audio.ndim == 1 assert len(lowerCAmelCase__) == 8_1920 SCREAMING_SNAKE_CASE_: Union[str, Any] = audio[2_7780:2_7790] SCREAMING_SNAKE_CASE_: Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212]) SCREAMING_SNAKE_CASE_: str = np.abs(expected_slice - audio_slice).max() assert max_diff < 3E-2
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A_ ( ): SCREAMING_SNAKE_CASE_: List[Any] = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_UpperCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_UpperCAmelCase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_UpperCAmelCase ) return parser.parse_args() def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE_: Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE_: Tuple = script_fpath.stem SCREAMING_SNAKE_CASE_: Tuple = importlib.import_module(_UpperCAmelCase ) # Patch sys.argv SCREAMING_SNAKE_CASE_: Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = 3_84 SCREAMING_SNAKE_CASE_: Optional[int] = 7 if "tiny" in model_name: SCREAMING_SNAKE_CASE_: List[Any] = 96 SCREAMING_SNAKE_CASE_: Tuple = (2, 2, 6, 2) SCREAMING_SNAKE_CASE_: Optional[int] = (3, 6, 12, 24) elif "small" in model_name: SCREAMING_SNAKE_CASE_: List[str] = 96 SCREAMING_SNAKE_CASE_: Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_: Any = (3, 6, 12, 24) elif "base" in model_name: SCREAMING_SNAKE_CASE_: str = 1_28 SCREAMING_SNAKE_CASE_: Optional[int] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_: Tuple = (4, 8, 16, 32) SCREAMING_SNAKE_CASE_: List[Any] = 12 SCREAMING_SNAKE_CASE_: Tuple = 5_12 elif "large" in model_name: SCREAMING_SNAKE_CASE_: Union[str, Any] = 1_92 SCREAMING_SNAKE_CASE_: int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_: Union[str, Any] = (6, 12, 24, 48) SCREAMING_SNAKE_CASE_: Dict = 12 SCREAMING_SNAKE_CASE_: Any = 7_68 # set label information SCREAMING_SNAKE_CASE_: int = 1_50 SCREAMING_SNAKE_CASE_: int = "huggingface/label-files" SCREAMING_SNAKE_CASE_: Any = "ade20k-id2label.json" SCREAMING_SNAKE_CASE_: Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Tuple = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Tuple = SwinConfig( embed_dim=_UpperCAmelCase , depths=_UpperCAmelCase , num_heads=_UpperCAmelCase , window_size=_UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , ) SCREAMING_SNAKE_CASE_: List[str] = UperNetConfig( backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , ) return config def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = dct.pop(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = val def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE_: Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE_: Tuple = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) SCREAMING_SNAKE_CASE_: str = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_: str = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE_: Dict = in_proj_bias[: dim] SCREAMING_SNAKE_CASE_: List[Any] = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_: Union[str, Any] = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_: Optional[Any] = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE_: str = in_proj_bias[-dim :] # fmt: on def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = x.shape SCREAMING_SNAKE_CASE_: Optional[int] = x.reshape(_UpperCAmelCase , 4 , in_channel // 4 ) SCREAMING_SNAKE_CASE_: str = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase ) return x def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = x.shape SCREAMING_SNAKE_CASE_: Dict = x.reshape(_UpperCAmelCase , in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE_: List[str] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase ) return x def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = x.shape[0] SCREAMING_SNAKE_CASE_: List[Any] = x.reshape(4 , in_channel // 4 ) SCREAMING_SNAKE_CASE_: List[str] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCAmelCase ) return x def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = x.shape[0] SCREAMING_SNAKE_CASE_: Optional[int] = x.reshape(in_channel // 4 , 4 ) SCREAMING_SNAKE_CASE_: Dict = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCAmelCase ) return x def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = { "upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth", "upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth", "upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth", "upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth", } SCREAMING_SNAKE_CASE_: str = model_name_to_url[model_name] SCREAMING_SNAKE_CASE_: int = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , file_name=_UpperCAmelCase )[ "state_dict" ] for name, param in state_dict.items(): print(_UpperCAmelCase , param.shape ) SCREAMING_SNAKE_CASE_: Union[str, Any] = get_upernet_config(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = UperNetForSemanticSegmentation(_UpperCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): SCREAMING_SNAKE_CASE_: List[str] = state_dict.pop(_UpperCAmelCase ) if "bn" in key: SCREAMING_SNAKE_CASE_: Optional[Any] = key.replace("bn" , "batch_norm" ) SCREAMING_SNAKE_CASE_: Optional[Any] = val # rename keys SCREAMING_SNAKE_CASE_: Tuple = create_rename_keys(_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: SCREAMING_SNAKE_CASE_: Tuple = reverse_correct_unfold_reduction_order(_UpperCAmelCase ) if "norm" in key: SCREAMING_SNAKE_CASE_: Dict = reverse_correct_unfold_norm_order(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) # verify on image SCREAMING_SNAKE_CASE_: Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" SCREAMING_SNAKE_CASE_: List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" ) SCREAMING_SNAKE_CASE_: List[Any] = SegformerImageProcessor() SCREAMING_SNAKE_CASE_: Tuple = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): SCREAMING_SNAKE_CASE_: Any = model(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": SCREAMING_SNAKE_CASE_: Any = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase : List[str] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : str): logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.") SCREAMING_SNAKE_CASE_: Union[str, Any] = model SCREAMING_SNAKE_CASE_: Any = kwargs.get("model_save_dir" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = kwargs.get("latest_model_name" , lowerCAmelCase__) def __call__( self : Any , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Any = {k: np.array(lowerCAmelCase__) for k, v in kwargs.items()} return self.model.run(lowerCAmelCase__ , lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=None): if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider") SCREAMING_SNAKE_CASE_: Any = "CPUExecutionProvider" return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name) SCREAMING_SNAKE_CASE_: str = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__) except shutil.SameFileError: pass # copy external weights (for models >2GB) SCREAMING_SNAKE_CASE_: int = self.model_save_dir.joinpath(lowerCAmelCase__) if src_path.exists(): SCREAMING_SNAKE_CASE_: int = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__) try: shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : List[Any] , ): if os.path.isfile(lowerCAmelCase__): logger.error(F"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) # saving model weights/files self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : Optional[Union[bool, str, None]] = None , lowerCAmelCase__ : Optional[Union[str, None]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional["ort.SessionOptions"] = None , **lowerCAmelCase__ : str , ): SCREAMING_SNAKE_CASE_: Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = OnnxRuntimeModel.load_model( os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = Path(lowerCAmelCase__) # load model from hub else: # download model SCREAMING_SNAKE_CASE_: Optional[Any] = hf_hub_download( repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[Any] = Path(lowerCAmelCase__).parent SCREAMING_SNAKE_CASE_: str = Path(lowerCAmelCase__).name SCREAMING_SNAKE_CASE_: int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__) return cls(model=lowerCAmelCase__ , **lowerCAmelCase__) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase__ : Union[str, Path] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : List[Any] , ): SCREAMING_SNAKE_CASE_: Tuple = None if len(str(lowerCAmelCase__).split("@")) == 2: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = model_id.split("@") return cls._from_pretrained( model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
import operator def A_ ( _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None ): SCREAMING_SNAKE_CASE_: Optional[int] = operator.lt if reverse else operator.gt SCREAMING_SNAKE_CASE_: List[str] = solution or [] if not arr: return solution SCREAMING_SNAKE_CASE_: Dict = [arr.pop(0 )] for i, item in enumerate(_UpperCAmelCase ): if _operator(_UpperCAmelCase , sublist[-1] ): sublist.append(_UpperCAmelCase ) arr.pop(_UpperCAmelCase ) # merging sublist into solution list if not solution: solution.extend(_UpperCAmelCase ) else: while sublist: SCREAMING_SNAKE_CASE_: Any = sublist.pop(0 ) for i, xx in enumerate(_UpperCAmelCase ): if not _operator(_UpperCAmelCase , _UpperCAmelCase ): solution.insert(_UpperCAmelCase , _UpperCAmelCase ) break else: solution.append(_UpperCAmelCase ) strand_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt""" ), """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt""" ), """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""", """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json""" ), """bert-base-multilingual-cased""": ( """https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json""" ), """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-cased""": ( """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Optional[int] = { """bert-base-uncased""": 512, """bert-large-uncased""": 512, """bert-base-cased""": 512, """bert-large-cased""": 512, """bert-base-multilingual-uncased""": 512, """bert-base-multilingual-cased""": 512, """bert-base-chinese""": 512, """bert-base-german-cased""": 512, """bert-large-uncased-whole-word-masking""": 512, """bert-large-cased-whole-word-masking""": 512, """bert-large-uncased-whole-word-masking-finetuned-squad""": 512, """bert-large-cased-whole-word-masking-finetuned-squad""": 512, """bert-base-cased-finetuned-mrpc""": 512, """bert-base-german-dbmdz-cased""": 512, """bert-base-german-dbmdz-uncased""": 512, """TurkuNLP/bert-base-finnish-cased-v1""": 512, """TurkuNLP/bert-base-finnish-uncased-v1""": 512, """wietsedv/bert-base-dutch-cased""": 512, } lowerCAmelCase : str = { """bert-base-uncased""": {"""do_lower_case""": True}, """bert-large-uncased""": {"""do_lower_case""": True}, """bert-base-cased""": {"""do_lower_case""": False}, """bert-large-cased""": {"""do_lower_case""": False}, """bert-base-multilingual-uncased""": {"""do_lower_case""": True}, """bert-base-multilingual-cased""": {"""do_lower_case""": False}, """bert-base-chinese""": {"""do_lower_case""": False}, """bert-base-german-cased""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False}, """bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-cased""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True}, """TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False}, """TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True}, """wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = VOCAB_FILES_NAMES _UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : int = BertTokenizer def __init__( self : Optional[int] , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="[UNK]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Union[str, Any]="[PAD]" , lowerCAmelCase__ : Optional[int]="[CLS]" , lowerCAmelCase__ : Union[str, Any]="[MASK]" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : List[str] , ): super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , normalizer_state.pop("type")) SCREAMING_SNAKE_CASE_: Any = do_lower_case SCREAMING_SNAKE_CASE_: Dict = strip_accents SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenize_chinese_chars SCREAMING_SNAKE_CASE_: Optional[Any] = normalizer_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=None): SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: int = [self.sep_token_id] SCREAMING_SNAKE_CASE_: List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): SCREAMING_SNAKE_CASE_: Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__) return tuple(lowerCAmelCase__)
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase : Tuple = { """configuration_chinese_clip""": [ """CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ChineseCLIPConfig""", """ChineseCLIPOnnxConfig""", """ChineseCLIPTextConfig""", """ChineseCLIPVisionConfig""", ], """processing_chinese_clip""": ["""ChineseCLIPProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ["""ChineseCLIPFeatureExtractor"""] lowerCAmelCase : str = ["""ChineseCLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ """CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """ChineseCLIPModel""", """ChineseCLIPPreTrainedModel""", """ChineseCLIPTextModel""", """ChineseCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = len(_UpperCAmelCase ) print("The following activities are selected:" ) # The first activity is always selected SCREAMING_SNAKE_CASE_: Optional[Any] = 0 print(_UpperCAmelCase , end="," ) # Consider rest of the activities for j in range(_UpperCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_UpperCAmelCase , end="," ) SCREAMING_SNAKE_CASE_: Optional[int] = j if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Tuple = [1, 3, 0, 5, 8, 5] lowerCAmelCase : str = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1