language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry/testutils/factories.py
{ "start": 8323, "end": 14689 }
class ____(Enum): ERROR = "error" DEFAULT = "default" def get_fixture_path(*parts: str) -> str: path = os.path.realpath(__file__) for _ in range(4): # src/sentry/testutils/{__file__} path = os.path.dirname(path) return os.path.join(path, "fixtures", *parts) def make_sentence(words=None): if words is None: words = int(random.weibullvariate(8, 3)) return " ".join(random.choice(loremipsum.words) for _ in range(words)) def make_word(words=None): if words is None: words = int(random.weibullvariate(8, 3)) return random.choice(loremipsum.words) DEFAULT_EVENT_DATA = { "extra": { "loadavg": [0.97607421875, 0.88330078125, 0.833984375], "sys.argv": [ "/Users/dcramer/.virtualenvs/sentry/bin/raven", "test", "https://ebc35f33e151401f9deac549978bda11:f3403f81e12e4c24942d505f086b2cad@sentry.io/1", ], "user": "dcramer", }, "modules": {"raven": "3.1.13"}, "request": { "cookies": {}, "data": {}, "env": {}, "headers": {}, "method": "GET", "query_string": "", "url": "http://example.com", }, "stacktrace": { "frames": [ { "abs_path": "www/src/sentry/models/foo.py", "context_line": " string_max_length=self.string_max_length)", "filename": "sentry/models/foo.py", "function": "build_msg", "in_app": True, "lineno": 29, "module": "raven.base", "post_context": [ " },", " })", "", " if 'stacktrace' in data:", " if self.include_paths:", ], "pre_context": [ "", " data.update({", " 'stacktrace': {", " 'frames': get_stack_info(frames,", " list_max_length=self.list_max_length,", ], "vars": { "culprit": "raven.scripts.runner", "date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)", "event_id": "598fb19363e745ec8be665e6ba88b1b2", "event_type": "raven.events.Message", "frames": "<generator object iter_stack_frames at 0x103fef050>", "handler": "<raven.events.Message object at 0x103feb710>", "k": "logentry", "public_key": None, "result": { "logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}" }, "self": "<raven.base.Client object at 0x104397f10>", "stack": True, "tags": None, "time_spent": None, }, }, { "abs_path": "/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py", "context_line": " string_max_length=self.string_max_length)", "filename": "raven/base.py", "function": "build_msg", "in_app": False, "lineno": 290, "module": "raven.base", "post_context": [ " },", " })", "", " if 'stacktrace' in data:", " if self.include_paths:", ], "pre_context": [ "", " data.update({", " 'stacktrace': {", " 'frames': get_stack_info(frames,", " list_max_length=self.list_max_length,", ], "vars": { "culprit": "raven.scripts.runner", "date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)", "event_id": "598fb19363e745ec8be665e6ba88b1b2", "event_type": "raven.events.Message", "frames": "<generator object iter_stack_frames at 0x103fef050>", "handler": "<raven.events.Message object at 0x103feb710>", "k": "logentry", "public_key": None, "result": { "logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}" }, "self": "<raven.base.Client object at 0x104397f10>", "stack": True, "tags": None, "time_spent": None, }, }, ] }, "tags": [], "platform": "python", } default_detector_config_data = { MetricIssue.slug: {"threshold_period": 1, "detection_type": "static"} } def _patch_artifact_manifest(path, org=None, release=None, project=None, extra_files=None): with open(path, "rb") as fp: manifest = orjson.loads(fp.read()) if org: manifest["org"] = org if release: manifest["release"] = release if project: manifest["project"] = project for path in extra_files or {}: manifest["files"][path] = {"url": path} return orjson.dumps(manifest).decode() def _set_sample_rate_from_error_sampling(normalized_data: MutableMapping[str, Any]) -> None: """Set 'sample_rate' on normalized_data if contexts.error_sampling.client_sample_rate is present and valid.""" client_sample_rate = None try: client_sample_rate = ( normalized_data.get("contexts", {}).get("error_sampling", {}).get("client_sample_rate") ) except Exception: pass if client_sample_rate: try: normalized_data["sample_rate"] = float(client_sample_rate) except Exception: pass # TODO(dcramer): consider moving to something more scalable like factoryboy
EventType
python
kamyu104__LeetCode-Solutions
Python/number-of-ways-to-paint-n-3-grid.py
{ "start": 876, "end": 1206 }
class ____(object): def numOfWays(self, n): """ :type n: int :rtype: int """ MOD = 10**9 + 7 aba, abc = 6, 6 for _ in xrange(n-1): aba, abc = (3*aba%MOD + 2*abc%MOD)%MOD, \ (2*abc%MOD + 2*aba%MOD)%MOD return (aba+abc)%MOD
Solution2
python
jpadilla__pyjwt
jwt/api_jwk.py
{ "start": 5853, "end": 6135 }
class ____: def __init__(self, jwk_set: PyJWKSet): self.jwk_set = jwk_set self.timestamp = time.monotonic() def get_jwk_set(self) -> PyJWKSet: return self.jwk_set def get_timestamp(self) -> float: return self.timestamp
PyJWTSetWithTimestamp
python
walkccc__LeetCode
solutions/302. Smallest Rectangle Enclosing Black Pixels/302.py
{ "start": 0, "end": 857 }
class ____: def minArea(self, image: list[list[str]], x: int, y: int) -> int: DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0)) m = len(image) n = len(image[0]) topLeft = [x, y] bottomRight = [x, y] q = collections.deque([(x, y)]) image[x][y] = '2' # Mark as visited. while q: i, j = q.popleft() for dx, dy in DIRS: r = i + dx c = j + dy if r < 0 or r == m or c < 0 or c == n: continue if image[r][c] != '1': continue topLeft[0] = min(topLeft[0], r) topLeft[1] = min(topLeft[1], c) bottomRight[0] = max(bottomRight[0], r) bottomRight[1] = max(bottomRight[1], c) q.append((r, c)) image[r][c] = '2' width = bottomRight[1] - topLeft[1] + 1 height = bottomRight[0] - topLeft[0] + 1 return width * height
Solution
python
pytorch__pytorch
tools/linter/adapters/pylint_linter.py
{ "start": 209, "end": 312 }
class ____(str, Enum): ERROR = "error" WARNING = "warning" DISABLED = "disabled"
LintSeverity
python
scrapy__scrapy
tests/test_engine.py
{ "start": 2544, "end": 2697 }
class ____(MySpider): async def start(self): for url in self.start_urls: yield Request(url) # no dont_filter=True
DupeFilterSpider
python
realpython__materials
python-assignment-statements/point_descriptor.py
{ "start": 0, "end": 386 }
class ____: def __set_name__(self, owner, name): self._name = name def __get__(self, instance, owner): return instance.__dict__[self._name] def __set__(self, instance, value): try: instance.__dict__[self._name] = float(value) except ValueError: raise ValueError(f'"{self._name}" must be a number') from None
Coordinate
python
huggingface__transformers
src/transformers/models/udop/processing_udop.py
{ "start": 994, "end": 1171 }
class ____(TextKwargs, total=False): word_labels: Optional[Union[list[int], list[list[int]]]] boxes: Optional[Union[list[list[int]], list[list[list[int]]]]]
UdopTextKwargs
python
django-haystack__django-haystack
haystack/exceptions.py
{ "start": 100, "end": 203 }
class ____(HaystackError): """Raised when a backend can not be found.""" pass
SearchBackendError
python
pytorch__pytorch
torch/_inductor/loop_body.py
{ "start": 2543, "end": 17661 }
class ____: """ Captures the body of a Loops subclass into an FX graph. Persists any indexing simplifications and makes it easier to analyze loop bodies. """ indexing_exprs: dict[str, sympy.Expr] submodules: dict[str, Any] subblocks: dict[str, LoopBodyBlock] indirect_vars: list[sympy.Symbol] indirect_var_ranges: dict[sympy.Symbol, sympy.Expr] root_block: LoopBodyBlock memory_usage: dict[MemoryUsageType, list[MemoryEntry]] op_counts: collections.Counter[str] # defined only temporarily indexing_exprs_name: dict[sympy.Expr, str] def __init__( self, fn, args, var_ranges, iter_vars, reduce_vars, allow_same_symbol_in_index=False, ): super().__init__() _flat_sizes = tuple(var_ranges.values()) self.sizes = ( _flat_sizes[: len(iter_vars)], _flat_sizes[len(iter_vars) :], ) self.iter_vars = iter_vars self.reduce_vars = reduce_vars self.var_ranges = var_ranges if isinstance(fn, LoopBody): self._init_with_copy(fn, args, allow_same_symbol_in_index) else: self._init_with_tracing(fn, args) self.indexing = None def get_original_num_rdims(self) -> int: assert self.has_partial_accumulate node = self.root_block.graph.find_nodes( op="call_method", target="partial_accumulate" )[0] meta = node.args[-1] return meta["num_reduction_dims"] def extract_pw_from_reduction(self): self.root_block = self.root_block.extract_pw_from_reduction() self.has_partial_accumulate = True self.iter_vars = self.iter_vars + self.reduce_vars self.reduce_vars = [] self.sizes = (self.sizes[0] + self.sizes[1], tuple()) return self def _init_with_tracing(self, fn, args): """Do an FX trace of an arbitrary callable to construct self""" self.indexing_exprs = {} self.indexing_exprs_name = {} self.submodules = {"get_index": self.get_index} self.subblocks = {} self.indirect_vars = [] self.indirect_var_ranges: dict[sympy.Symbol, sympy.Expr] = {} self.memory_usage = {t: [] for t in MemoryUsageType} self.op_counts = collections.Counter() self.root_block = LoopBodyBlock(self, fn, args) # traces self.has_partial_accumulate = self.root_block.graph.find_nodes( op="call_method", target="partial_accumulate" ) del self.indexing_exprs_name # not used after _init_with_tracing def _init_with_copy(self, other: LoopBody, args, allow_same_symbol_in_index): """ _init_with_tracing() is slow, so this is a fast path in the case where we are just reordering/merging/splitting the args of an existing LoopBody. """ indexing_exprs = other.indexing_from_args(args, allow_same_symbol_in_index) self.indexing_exprs = { name: V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges) for name, expr in indexing_exprs.items() } self.subblocks = {k: v.clone(self) for k, v in other.subblocks.items()} self.indirect_vars = other.indirect_vars self.indirect_var_ranges = other.indirect_var_ranges self.memory_usage = other.memory_usage self.op_counts = other.op_counts self.root_block = other.root_block.clone(self) self.has_partial_accumulate = other.has_partial_accumulate submodules = {**other.submodules} submodules.pop("get_index") self.submodules = { "get_index": self.get_index, **{k: v.clone(self) for k, v in submodules.items()}, # type: ignore[attr-defined] } def has_op(self, name: str): return self.op_counts.get(name, 0) > 0 def merge_loops(self) -> LoopBody: """ Merge both iteration and reduction loops and return a new LoopBody. """ old_body = self old_sizes = self.sizes old_iter_vars, old_reduce_vars = old_body.vars old_iter_sizes, old_reduce_sizes = old_sizes index_exprs = [*old_body.indexing_exprs.values()] iter_sizes, iter_reindex, _ = V.graph.sizevars._simplify_loops( old_iter_vars, old_iter_sizes, index_prevent_reordering(index_exprs, old_iter_vars, old_iter_sizes), ) reduce_sizes, reduce_reindex, _ = V.graph.sizevars._simplify_loops( old_reduce_vars, old_reduce_sizes, index_prevent_reordering(index_exprs, old_reduce_vars, old_reduce_sizes), ) if iter_sizes == old_iter_sizes and reduce_sizes == old_reduce_sizes: return old_body ( ( iter_vars, reduce_vars, ), var_ranges, ) = dependencies.index_vars_no_squeeze(iter_sizes, reduce_sizes, prefix="p") new_body = LoopBody( old_body, [iter_reindex(iter_vars), reduce_reindex(reduce_vars)], var_ranges, iter_vars, reduce_vars, allow_same_symbol_in_index=True, ) return new_body def expand_dimension_for_pointwise_node( self, dimension: int, new_range: int ) -> LoopBody: """ Expand node on `dimension` to `new_range` and rely on index modular to avoid out-of-boundary access. """ old_body = self old_sizes = self.sizes iter_size, reduce_size = old_sizes original_range = iter_size[dimension] new_iter_size = list(iter_size) new_iter_size[dimension] = new_range new_sizes = (new_iter_size, reduce_size) (iter_vars, reduce_vars), var_ranges = dependencies.index_vars_no_squeeze( *new_sizes, prefix="t", # type: ignore[arg-type] ) def new_body(*indices: Sequence[sympy.Expr]) -> Any: index = [*itertools.chain.from_iterable(indices)] assert len(index) == len(iter_size) + len(reduce_size) iter_idx = index[: len(iter_size)] reduce_idx = index[len(iter_size) :] new_iter_idx = list(iter_idx) new_iter_idx[dimension] = iter_idx[dimension] % original_range return old_body(new_iter_idx, reduce_idx) loop_body = LoopBody( new_body, (iter_vars, reduce_vars), var_ranges, iter_vars, reduce_vars ) # use the original symbol prefix so we can do multiple round of reordering (iter_vars2, reduce_vars2), var_ranges2 = dependencies.index_vars_no_squeeze( *new_sizes, prefix="p", # type: ignore[arg-type] ) new_body = LoopBody( loop_body, (iter_vars2, reduce_vars2), var_ranges2, iter_vars2, reduce_vars2 ) return new_body def reorder_iter_loops(self, new_order) -> LoopBody: """ Reorder iteration loops and return a new LoopBody. """ from .ir import same_reorder old_body = self old_sizes = self.sizes assert len(old_sizes[0]) == len(new_order) reorder_fn = same_reorder(new_order) iter_size, reduce_size = old_sizes new_iter_size = reorder_fn(iter_size) new_sizes = (new_iter_size, reduce_size) (iter_vars, reduce_vars), var_ranges = dependencies.index_vars_no_squeeze( *new_sizes, prefix="p", # type: ignore[arg-type] ) inverse_order = {b: a for a, b in enumerate(new_order)} inverse_order = [inverse_order[i] for i in range(len(new_order))] def new_body(*indices: Sequence[sympy.Expr]) -> Any: index = [*itertools.chain.from_iterable(indices)] assert len(index) == len(iter_size) + len(reduce_size) iter_idx = index[: len(iter_size)] reduce_idx = index[len(iter_size) :] iter_idx = [iter_idx[i] for i in inverse_order] return old_body(iter_idx, reduce_idx, allow_same_symbol_in_index=True) return LoopBody( new_body, (iter_vars, reduce_vars), var_ranges, iter_vars, reduce_vars, ) @property def vars(self): assert self.iter_vars is not None assert self.reduce_vars is not None return self.iter_vars, self.reduce_vars @cache_on_self def get_nodes(self): all_graphs = itertools.chain( (self.root_block.graph,), (block.graph for block in self.subblocks.values()), ) return [node for graph in all_graphs for node in graph.nodes] @cache_on_self def bounds(self): # Doing a local import to avoid dumping all the code here from .bounds import BoundVars return BoundVars(self) def get_read_expr(self, buffer_name): # reversed to match old behavior for entry in reversed(self.memory_usage[MemoryUsageType.LOAD]): if entry.buffer_name == buffer_name: return self.indexing_exprs[entry.index_name] raise KeyError(buffer_name) def get_write_expr(self, buffer_name): for entry in itertools.chain( self.memory_usage[MemoryUsageType.STORE], self.memory_usage[MemoryUsageType.STORE_REDUCTION], ): if entry.buffer_name == buffer_name: return self.indexing_exprs[entry.index_name] raise KeyError(buffer_name) def get_read_exprs(self): return [ self.indexing_exprs[entry.index_name] for entry in self.memory_usage[MemoryUsageType.LOAD] ] def get_all_read_expr(self, buffer_name): # reversed to match old behavior out = [] for entry in reversed(self.memory_usage[MemoryUsageType.LOAD]): if entry.buffer_name == buffer_name: out.append(self.indexing_exprs[entry.index_name]) return out def get_write_exprs(self): return [ self.indexing_exprs[entry.index_name] for entry in itertools.chain( self.memory_usage[MemoryUsageType.STORE], self.memory_usage[MemoryUsageType.STORE_REDUCTION], ) ] def get_all_write_expr(self, buffer_name): out = [] for entry in itertools.chain( self.memory_usage[MemoryUsageType.STORE], self.memory_usage[MemoryUsageType.STORE_REDUCTION], ): if entry.buffer_name == buffer_name: out.append(self.indexing_exprs[entry.index_name]) return out def debug_str(self): lines = [f"var_ranges = {dict(self.var_ranges)}"] lines.extend([f"{name} = {val}" for name, val in self.indexing_exprs.items()]) lines.extend( [ block.debug_str(name) for name, block in itertools.chain( [("body", self.root_block)], self.subblocks.items() ) ] ) return "\n".join(lines) def is_memory_copy(self) -> bool: """ True of this contains only a single loads and store. Note, this could involve a layout change. """ return ( len(self.memory_usage[MemoryUsageType.LOAD]) == 1 and len(self.memory_usage[MemoryUsageType.STORE]) == 1 and len(self.submodules) == 1 # get_index and self.root_block.contains_only_ops(("load", "store")) ) __repr__ = debug_str def add_index_expr( self, expr: sympy.Expr, mtype: MemoryUsageType, buffer_name: Optional[str] = None, mode: Optional[str] = None, ): name = self.indexing_exprs_name.get(expr) if not name: name = f"index{len(self.indexing_exprs)}" self.indexing_exprs_name[expr] = name self.indexing_exprs[name] = expr self.memory_usage[mtype].append(MemoryEntry(name, buffer_name, mode)) return name def add_submodule(self, block, prefix): """Not actually for nn.Modules, but subblocks in generated code are mapped to FX call_module opcodes""" if prefix[-1].isnumeric() and prefix not in self.submodules: name = prefix else: name = f"{prefix}{len(self.submodules)}" self.submodules[name] = block return name def add_indirect(self, size): var = sympy_index_symbol_with_prefix(SymT.INDIRECT, len(self.indirect_vars)) assert var not in self.indirect_var_ranges self.indirect_vars.append(var) self.indirect_var_ranges[var] = size return var def replace_indirect(self, old, new): """Swap in a variable used in indirect indexing""" if str(old) == str(new): return assert self.indexing is not None # pyrefly: ignore [bad-assignment] self.indexing = {k: sympy_subs(v, {old: new}) for k, v in self.indexing.items()} def get_index(self, name): assert self.indexing is not None return self.indexing[name] def indexing_from_args(self, indices, allow_same_symbol_in_index=False): index = [*itertools.chain.from_iterable(indices)] assert len(index) == len(self.var_ranges), (index, self.var_ranges) assert allow_same_symbol_in_index or all( v not in self.var_ranges for v in index ), f"{self.var_ranges=}, {indices=}" replacements = dict(zip(self.var_ranges.keys(), index)) return { name: sympy_subs(expr, replacements) for name, expr in self.indexing_exprs.items() } def __call__(self, *indices, allow_same_symbol_in_index=False): self.indexing = self.indexing_from_args(indices, allow_same_symbol_in_index) result = self.root_block() self.indexing = None return result def bind_set_indirect_shim(self, var, size, check, wrap_neg): def set_indirect(new_var): self.replace_indirect( var, V.ops.indirect_indexing(new_var, size, check, wrap_neg) ) set_indirect.clone = functools.partial( # type: ignore[attr-defined] LoopBody.bind_set_indirect_shim, var=var, size=size, check=check, wrap_neg=wrap_neg, ) return set_indirect def bind_scan_shim(self, combine_fn): def shim(dtypes, values): return V.ops.scan(dtypes, combine_fn, values) shim.clone = functools.partial(LoopBody.bind_scan_shim, combine_fn=combine_fn) # type: ignore[attr-defined] return shim def bind_masked_shim(self, name): def shim(mask, other): return V.ops.masked(mask, self.subblocks[name], other) shim.clone = functools.partial(LoopBody.bind_masked_shim, name=name) # type: ignore[attr-defined] return shim
LoopBody
python
bokeh__bokeh
src/bokeh/protocol/messages/server_info_reply.py
{ "start": 1669, "end": 3097 }
class ____(Message[ServerInfo]): ''' Define the ``SERVER-INFO-REPLY`` message for replying to Server info requests from clients. The ``content`` fragment of for this message is has the form: .. code-block:: python { 'version_info' : { 'bokeh' : <bokeh library version> 'server' : <bokeh server version> } } ''' msgtype = 'SERVER-INFO-REPLY' @classmethod def create(cls, request_id: ID, **metadata: Any) -> server_info_reply: ''' Create an ``SERVER-INFO-REPLY`` message Args: request_id (str) : The message ID for the message that issues the info request Any additional keyword arguments will be put into the message ``metadata`` fragment as-is. ''' header = cls.create_header(request_id=request_id) content = ServerInfo(version_info=_VERSION_INFO) return cls(header, metadata, content) #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- _VERSION_INFO = VersionInfo( bokeh = __version__, server = __version__, ) #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
server_info_reply
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py
{ "start": 2264, "end": 7421 }
class ____(GoogleCloudBaseOperator): """ Creates an entry. Currently only entries of 'FILESET' type can be created. The newly created entry ID are saved under the ``entry_id`` key in XCOM. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudDataCatalogCreateEntryOperator` :param location: Required. The location of the entry to create. :param entry_group: Required. Entry group ID under which the entry is created. :param entry_id: Required. The id of the entry to create. :param entry: Required. The entry to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.datacatalog_v1beta1.types.Entry` :param project_id: The ID of the Google Cloud project that owns the entry. If set to ``None`` or missing, the default project_id from the Google Cloud connection is used. :param retry: A retry object used to retry requests. If set to ``None`` or missing, requests will be retried using a default configuration. :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :param metadata: Additional metadata that is provided to the method. :param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "location", "entry_group", "entry_id", "entry", "project_id", "retry", "timeout", "metadata", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (DataCatalogEntryLink(),) def __init__( self, *, location: str, entry_group: str, entry_id: str, entry: dict | Entry, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.entry_group = entry_group self.entry_id = entry_id self.entry = entry self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudDataCatalogHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain ) try: result = hook.create_entry( location=self.location, entry_group=self.entry_group, entry_id=self.entry_id, entry=self.entry, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) except AlreadyExists: self.log.info("Entry already exists. Skipping create operation.") result = hook.get_entry( location=self.location, entry_group=self.entry_group, entry=self.entry_id, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) _, _, entry_id = result.name.rpartition("/") self.log.info("Current entry_id ID: %s", entry_id) context["ti"].xcom_push(key="entry_id", value=entry_id) DataCatalogEntryLink.persist( context=context, entry_id=self.entry_id, entry_group_id=self.entry_group, location_id=self.location, project_id=self.project_id or hook.project_id, ) return Entry.to_dict(result) @deprecated( planned_removal_date="January 30, 2026", use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogCreateEntryGroupOperator", reason="The Data Catalog will be discontinued on January 30, 2026 " "in favor of Dataplex Universal Catalog.", category=AirflowProviderDeprecationWarning, )
CloudDataCatalogCreateEntryOperator
python
python-poetry__poetry
src/poetry/console/commands/self/show/plugins.py
{ "start": 1063, "end": 3990 }
class ____(SelfCommand): name = "self show plugins" description = "Shows information about the currently installed plugins." help = """\ The <c1>self show plugins</c1> command lists all installed Poetry plugins. Plugins can be added and removed using the <c1>self add</c1> and <c1>self remove</c1> \ commands respectively. <warning>This command does not list packages that do not provide a Poetry plugin.</> """ def _system_project_handle(self) -> int: from packaging.utils import canonicalize_name from poetry.plugins.application_plugin import ApplicationPlugin from poetry.plugins.plugin import Plugin from poetry.plugins.plugin_manager import PluginManager from poetry.repositories.installed_repository import InstalledRepository from poetry.utils.env import EnvManager from poetry.utils.helpers import pluralize plugins: dict[str, PluginPackage] = {} system_env = EnvManager.get_system_env(naive=True) installed_repository = InstalledRepository.load( system_env, with_dependencies=True ) packages_by_name: dict[str, Package] = { pkg.name: pkg for pkg in installed_repository.packages } for group in [ApplicationPlugin.group, Plugin.group]: for entry_point in PluginManager(group).get_plugin_entry_points(): assert entry_point.dist is not None package = packages_by_name[canonicalize_name(entry_point.dist.name)] name = package.pretty_name info = plugins.get(name) or PluginPackage(package=package) info.append(entry_point) plugins[name] = info for name, info in plugins.items(): package = info.package description = " " + package.description if package.description else "" self.line("") self.line(f" - <c1>{name}</c1> (<c2>{package.version}</c2>){description}") provide_line = " " if info.plugins: count = len(info.plugins) provide_line += f" <info>{count}</info> plugin{pluralize(count)}" if info.application_plugins: if info.plugins: provide_line += " and" count = len(info.application_plugins) provide_line += ( f" <info>{count}</info> application plugin{pluralize(count)}" ) self.line(provide_line) if package.requires: self.line("") self.line(" <info>Dependencies</info>") for dependency in package.requires: self.line( f" - {dependency.pretty_name}" f" (<c2>{dependency.pretty_constraint}</c2>)" ) return 0
SelfShowPluginsCommand
python
ray-project__ray
python/ray/data/aggregate.py
{ "start": 13328, "end": 14927 }
class ____(AggregateFnV2[Union[int, float], Union[int, float]]): """Defines sum aggregation. Example: .. testcode:: import ray from ray.data.aggregate import Sum ds = ray.data.range(100) # Schema: {'id': int64} ds = ds.add_column("group_key", lambda x: x % 3) # Schema: {'id': int64, 'group_key': int64} # Summing all rows per group: result = ds.aggregate(Sum(on="id")) # result: {'sum(id)': 4950} Args: on: The name of the numerical column to sum. Must be provided. ignore_nulls: Whether to ignore null values during summation. If `True` (default), nulls are skipped. If `False`, the sum will be null if any value in the group is null. alias_name: Optional name for the resulting column. """ def __init__( self, on: Optional[str] = None, ignore_nulls: bool = True, alias_name: Optional[str] = None, ): super().__init__( alias_name if alias_name else f"sum({str(on)})", on=on, ignore_nulls=ignore_nulls, zero_factory=lambda: 0, ) def aggregate_block(self, block: Block) -> Union[int, float]: return BlockAccessor.for_block(block).sum( self._target_col_name, self._ignore_nulls ) def combine( self, current_accumulator: Union[int, float], new: Union[int, float] ) -> Union[int, float]: return current_accumulator + new @PublicAPI
Sum
python
scipy__scipy
scipy/fftpack/tests/test_real_transforms.py
{ "start": 17415, "end": 17546 }
class ____(_TestIDSTBase): def setup_method(self): self.rdt = int self.dec = 4 self.type = 1
TestIDSTIInt
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_A.py
{ "start": 4534, "end": 5636 }
class ____(Benchmark): r""" Alpine01 objective function. The Alpine01 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\text{Alpine01}}(x) = \sum_{i=1}^{n} \lvert {x_i \sin \left( x_i \right) + 0.1 x_i} \rvert Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return sum(abs(x * sin(x) + 0.1 * x))
Alpine01
python
celery__celery
celery/events/state.py
{ "start": 4524, "end": 7844 }
class ____: """Worker State.""" heartbeat_max = 4 expire_window = HEARTBEAT_EXPIRE_WINDOW _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock', 'active', 'processed', 'loadavg', 'sw_ident', 'sw_ver', 'sw_sys') if not PYPY: # pragma: no cover __slots__ = _fields + ('event', '__dict__', '__weakref__') def __init__(self, hostname=None, pid=None, freq=60, heartbeats=None, clock=0, active=None, processed=None, loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None): self.hostname = hostname self.pid = pid self.freq = freq self.heartbeats = [] if heartbeats is None else heartbeats self.clock = clock or 0 self.active = active self.processed = processed self.loadavg = loadavg self.sw_ident = sw_ident self.sw_ver = sw_ver self.sw_sys = sw_sys self.event = self._create_event_handler() def __reduce__(self): return self.__class__, (self.hostname, self.pid, self.freq, self.heartbeats, self.clock, self.active, self.processed, self.loadavg, self.sw_ident, self.sw_ver, self.sw_sys) def _create_event_handler(self): _set = object.__setattr__ hbmax = self.heartbeat_max heartbeats = self.heartbeats hb_pop = self.heartbeats.pop hb_append = self.heartbeats.append def event(type_, timestamp=None, local_received=None, fields=None, max_drift=HEARTBEAT_DRIFT_MAX, abs=abs, int=int, insort=bisect.insort, len=len): fields = fields or {} for k, v in fields.items(): _set(self, k, v) if type_ == 'offline': heartbeats[:] = [] else: if not local_received or not timestamp: return drift = abs(int(local_received) - int(timestamp)) if drift > max_drift: _warn_drift(self.hostname, drift, local_received, timestamp) if local_received: # pragma: no cover hearts = len(heartbeats) if hearts > hbmax - 1: hb_pop(0) if hearts and local_received > heartbeats[-1]: hb_append(local_received) else: insort(heartbeats, local_received) return event def update(self, f, **kw): d = dict(f, **kw) if kw else f for k, v in d.items(): setattr(self, k, v) def __repr__(self): return R_WORKER.format(self) @property def status_string(self): return 'ONLINE' if self.alive else 'OFFLINE' @property def heartbeat_expires(self): return heartbeat_expires(self.heartbeats[-1], self.freq, self.expire_window) @property def alive(self, nowfun=time): return bool(self.heartbeats and nowfun() < self.heartbeat_expires) @property def id(self): return '{0.hostname}.{0.pid}'.format(self) @with_unique_field('uuid')
Worker
python
HypothesisWorks__hypothesis
hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py
{ "start": 907, "end": 963 }
class ____: value: Any child: Any @dataclass
Write
python
fluentpython__example-code-2e
21-async/mojifinder/bottle.py
{ "start": 110828, "end": 111857 }
class ____(ServerAdapter): def run(self, app): # pragma: no cover from wsgiref.simple_server import WSGIRequestHandler, WSGIServer from wsgiref.simple_server import make_server import socket class FixedHandler(WSGIRequestHandler): def address_string(self): # Prevent reverse DNS lookups please. return self.client_address[0] def log_request(*args, **kw): if not self.quiet: return WSGIRequestHandler.log_request(*args, **kw) handler_cls = self.options.get('handler_class', FixedHandler) server_cls = self.options.get('server_class', WSGIServer) if ':' in self.host: # Fix wsgiref for IPv6 addresses. if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 srv = make_server(self.host, self.port, app, server_cls, handler_cls) srv.serve_forever()
WSGIRefServer
python
huggingface__transformers
tests/models/llava_onevision/test_image_processing_llava_onevision.py
{ "start": 3205, "end": 13816 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LlavaOnevisionImageProcessor if is_vision_available() else None fast_image_processing_class = LlavaOnevisionImageProcessorFast if is_torchvision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->LlavaOnevision def setUp(self): super().setUp() self.image_processor_tester = LlavaOnevisionImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "image_grid_pinpoints")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip( reason="LlavaOnevisionImageProcessor doesn't treat 4 channel PIL and numpy consistently yet" ) # FIXME raushan def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [[image_input] for image_input in image_inputs] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 1522, 3, 20, 20) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of input format self.assertTrue((encoded_images_nested == encoded_images).all()) def test_multi_images(self): length = 384 scale_single, scale_multi = 2, 3 image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() image_processor_dict["size"] = {"height": length, "width": length} # patch size for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**image_processor_dict) # Test batched as a nested list of images, where each sublist is one batch len_image_1 = length * scale_single image_inputs_1 = prepare_image_inputs( batch_size=1, min_resolution=0, # not used max_resolution=len_image_1, num_channels=3, equal_resolution=True, ) len_image_2 = length * scale_multi image_inputs_2 = prepare_image_inputs( batch_size=7, min_resolution=0, # not used max_resolution=len_image_2, num_channels=3, equal_resolution=True, ) image_inputs = [image_inputs_1, image_inputs_2] # Only single image should be patchified expected_num_patches = scale_single**2 + 1 # +1 for base image patch expected_output_image_shape = (8, expected_num_patches, 3, length, length) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip( reason="LlavaOnevisionImageProcessorFast doesn't compile (infinitely) when using class transforms" ) # FIXME yoni @pytest.mark.torch_compile_test def test_can_compile_fast_image_processor(self): pass def test_pad_for_patching(self): for image_processing_class in self.image_processor_list: if image_processing_class == self.fast_image_processing_class: numpify = False torchify = True input_data_format = image_processing_class.data_format else: numpify = True torchify = False input_data_format = ChannelDimension.LAST image_processing = image_processing_class(**self.image_processor_dict) # Create odd-sized images image_input = self.image_processor_tester.prepare_image_inputs( equal_resolution=True, numpify=numpify, torchify=torchify, )[0] self.assertIn(image_input.shape, [(3, 400, 400), (400, 400, 3)]) # Test odd-width image_shape = (400, 601) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) # Test odd-height image_shape = (503, 400) encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format) encoded_image_shape = ( encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:] ) self.assertEqual(encoded_image_shape, image_shape) def test_call_without_padding(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test not batched input encoded_images = image_processing(image_inputs[0], do_pad=False).pixel_values self.assertEqual(len(encoded_images), 1) # Test batched encoded_images = image_processing(image_inputs, do_pad=False).pixel_values self.assertEqual(len(encoded_images), len(image_inputs))
LlavaOnevisionImageProcessingTest
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker_unified_studio.py
{ "start": 1515, "end": 6888 }
class ____(BaseOperator): """ Provides Artifact execution functionality for Sagemaker Unified Studio Workflows. Examples: .. code-block:: python from airflow.providers.amazon.aws.operators.sagemaker_unified_studio import SageMakerNotebookOperator notebook_operator = SageMakerNotebookOperator( task_id="notebook_task", input_config={"input_path": "path/to/notebook.ipynb", "input_params": ""}, output_config={"output_format": "ipynb"}, wait_for_completion=True, waiter_delay=10, waiter_max_attempts=1440, ) :param task_id: A unique, meaningful id for the task. :param input_config: Configuration for the input file. Input path should be specified as a relative path. The provided relative path will be automatically resolved to an absolute path within the context of the user's home directory in the IDE. Input params should be a dict. Example: {'input_path': 'folder/input/notebook.ipynb', 'input_params':{'key': 'value'}} :param output_config: Configuration for the output format. It should include an output_format parameter to control the format of the notebook execution output. Example: {"output_formats": ["NOTEBOOK"]} :param compute: compute configuration to use for the artifact execution. This is a required attribute if the execution is on a remote compute. Example: { "InstanceType": "ml.m5.large", "VolumeSizeInGB": 30, "VolumeKmsKeyId": "", "ImageUri": "string", "ContainerEntrypoint": [ "string" ]} :param termination_condition: conditions to match to terminate the remote execution. Example: { "MaxRuntimeInSeconds": 3600 } :param tags: tags to be associated with the remote execution runs. Example: { "md_analytics": "logs" } :param wait_for_completion: Indicates whether to wait for the notebook execution to complete. If True, wait for completion; if False, don't wait. :param waiter_delay: Interval in seconds to check the notebook execution status. :param waiter_max_attempts: Number of attempts to wait before returning FAILED. :param deferrable: If True, the operator will wait asynchronously for the job to complete. This implies waiting for completion. This mode requires aiobotocore module to be installed. (default: False) .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SageMakerNotebookOperator` """ operator_extra_links = (SageMakerUnifiedStudioLink(),) def __init__( self, task_id: str, input_config: dict, output_config: dict | None = None, compute: dict | None = None, termination_condition: dict | None = None, tags: dict | None = None, wait_for_completion: bool = True, waiter_delay: int = 10, waiter_max_attempts: int = 1440, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(task_id=task_id, **kwargs) self.execution_name = task_id self.input_config = input_config self.output_config = output_config or {"output_formats": ["NOTEBOOK"]} self.compute = compute or {} self.termination_condition = termination_condition or {} self.tags = tags or {} self.wait_for_completion = wait_for_completion self.waiter_delay = waiter_delay self.waiter_max_attempts = waiter_max_attempts self.deferrable = deferrable self.input_kwargs = kwargs @cached_property def notebook_execution_hook(self): if not self.input_config: raise AirflowException("input_config is required") if "input_path" not in self.input_config: raise AirflowException("input_path is a required field in the input_config") return SageMakerNotebookHook( input_config=self.input_config, output_config=self.output_config, execution_name=self.execution_name, compute=self.compute, termination_condition=self.termination_condition, tags=self.tags, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, ) def execute(self, context: Context): notebook_execution = self.notebook_execution_hook.start_notebook_execution() execution_id = notebook_execution["execution_id"] if self.deferrable: self.defer( trigger=SageMakerNotebookJobTrigger( execution_id=execution_id, execution_name=self.execution_name, waiter_delay=self.waiter_delay, waiter_max_attempts=self.waiter_max_attempts, ), method_name="execute_complete", ) elif self.wait_for_completion: response = self.notebook_execution_hook.wait_for_execution_completion(execution_id, context) status = response["Status"] log_info_message = ( f"Notebook Execution: {self.execution_name} Status: {status}. Run Id: {execution_id}" ) self.log.info(log_info_message)
SageMakerNotebookOperator
python
apache__airflow
providers/standard/tests/unit/standard/operators/test_python.py
{ "start": 49140, "end": 68513 }
class ____(BaseTestPythonVirtualenvOperator): opcls = PythonVirtualenvOperator @staticmethod def default_kwargs(*, python_version=DEFAULT_PYTHON_VERSION, **kwargs): kwargs["python_version"] = python_version if "do_not_use_caching" in kwargs: kwargs.pop("do_not_use_caching") else: # Caching by default makes the tests run faster except few cases we want to test with regular venv if "venv_cache_path" not in kwargs: kwargs["venv_cache_path"] = venv_cache_path return kwargs @CLOUDPICKLE_MARKER def test_add_cloudpickle(self): def f(): """Ensure cloudpickle is correctly installed.""" import cloudpickle # noqa: F401 self.run_as_task(f, serializer="cloudpickle", system_site_packages=False) @DILL_MARKER def test_add_dill(self): def f(): """Ensure dill is correctly installed.""" import dill # noqa: F401 self.run_as_task(f, serializer="dill", system_site_packages=False) def test_invalid_serializer(self): def f(): """Ensure dill is correctly installed.""" import dill # noqa: F401 with pytest.raises(AirflowException, match="Unsupported serializer 'airflow'"): self.run_as_task(f, serializer="airflow") def test_no_requirements(self): """Tests that the python callable is invoked on task run.""" def f(): pass self.run_as_task(f) @pytest.mark.parametrize( ("serializer", "extra_requirements"), [ pytest.param("pickle", [], id="pickle"), pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, [], id="default"), ], ) def test_no_system_site_packages(self, serializer, extra_requirements): def f(): try: import funcsigs # noqa: F401 except ImportError: return True raise RuntimeError self.run_as_task( f, system_site_packages=False, requirements=extra_requirements, serializer=serializer ) def test_system_site_packages(self): def f(): import funcsigs # noqa: F401 self.run_as_task(f, requirements=["funcsigs"], system_site_packages=True) def test_with_requirements_pinned(self): def f(): import funcsigs if funcsigs.__version__ != "0.4": raise RuntimeError self.run_as_task(f, requirements=["funcsigs==0.4"]) def test_with_no_caching(self): """ Most of venv tests use caching to speed up the tests. This test ensures that we have test without caching as well. :return: """ def f(): import funcsigs if funcsigs.__version__ != "0.4": raise RuntimeError self.run_as_task(f, requirements=["funcsigs==0.4"], do_not_use_caching=True) @pytest.mark.parametrize( ("serializer", "extra_requirements"), [ pytest.param("pickle", [], id="pickle"), pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, [], id="default"), ], ) def test_unpinned_requirements(self, serializer, extra_requirements): def f(): import funcsigs # noqa: F401 self.run_as_task( f, requirements=["funcsigs", *extra_requirements], system_site_packages=False, serializer=serializer, ) @pytest.mark.parametrize( ("serializer", "extra_requirements"), [ pytest.param("pickle", [], id="pickle"), pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, [], id="default"), ], ) def test_range_requirements(self, serializer, extra_requirements): def f(): import funcsigs # noqa: F401 self.run_as_task( f, requirements=["funcsigs>1.0", *extra_requirements], system_site_packages=False, serializer=serializer, ) def test_requirements_file(self): def f(): import funcsigs # noqa: F401 self.run_as_operator(f, requirements="requirements.txt", system_site_packages=False) @mock.patch("airflow.providers.standard.operators.python.prepare_virtualenv") def test_pip_install_options(self, mocked_prepare_virtualenv): def f(): import funcsigs # noqa: F401 mocked_prepare_virtualenv.side_effect = prepare_virtualenv self.run_as_task( f, requirements=["funcsigs==0.4"], system_site_packages=False, pip_install_options=["--no-deps"], ) mocked_prepare_virtualenv.assert_called_with( index_urls=None, venv_directory=mock.ANY, python_bin=mock.ANY, system_site_packages=False, requirements_file_path=mock.ANY, pip_install_options=["--no-deps"], ) @pytest.mark.parametrize( "serializer", [ pytest.param("pickle", id="pickle"), pytest.param("dill", marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, id="default"), ], ) def test_templated_requirements_file(self, serializer): def f(): import funcsigs assert funcsigs.__version__ == "1.0.2" self.run_as_operator( f, requirements="requirements.txt", serializer=serializer, params={"environ": "templated_unit_test"}, system_site_packages=False, ) @pytest.mark.parametrize( ("serializer", "extra_requirements"), [ pytest.param("pickle", [], id="pickle"), pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, [], id="default"), ], ) def test_python_3_serializers(self, serializer, extra_requirements): def f(): import sys print(sys.version) try: {}.iteritems() except AttributeError: return raise RuntimeError self.run_as_task(f, python_version="3", serializer=serializer, requirements=extra_requirements) def test_with_default(self): def f(a): return a self.run_as_task(f, system_site_packages=False, op_args=[4]) @mock.patch( "airflow.providers.standard.utils.python_virtualenv._execute_in_subprocess", wraps=_execute_in_subprocess, ) def test_with_index_urls(self, wrapped_execute_in_subprocess): def f(a): import sys from pathlib import Path pip_conf = (Path(sys.executable).parents[1] / "pip.conf").read_text() assert "first.package.index" in pip_conf assert "second.package.index" in pip_conf assert "third.package.index" in pip_conf return a self.run_as_task( f, index_urls=[ "https://first.package.index", "http://second.package.index", "http://third.package.index", ], op_args=[4], ) # first call creates venv, second call installs packages package_install_call_args = wrapped_execute_in_subprocess.call_args[1] assert package_install_call_args["env"]["UV_DEFAULT_INDEX"] == "https://first.package.index" assert ( package_install_call_args["env"]["UV_INDEX"] == "http://second.package.index http://third.package.index" ) def test_with_index_url_from_connection(self, monkeypatch): class MockConnection(Connection): """Mock for the Connection class.""" def __init__(self, host: str | None, login: str | None, password: str | None): super().__init__() self.host = host self.login = login self.password = password monkeypatch.setattr( "airflow.providers.standard.hooks.package_index.PackageIndexHook.get_connection", lambda *_: MockConnection("https://my.package.index", "my_username", "my_password"), ) def f(a): import sys from pathlib import Path pip_conf = (Path(sys.executable).parents[1] / "pip.conf").read_text() assert "abc.def.de" in pip_conf assert "https://my_username:my_password@my.package.index" in pip_conf return a self.run_as_task( f, index_urls=["https://abc.def.de"], index_urls_from_connection_ids=["my_connection"], op_args=[4], ) def test_caching(self): def f(a): import sys assert "pytest_venv_1234" in sys.executable return a with TemporaryDirectory(prefix="pytest_venv_1234") as tmp_dir: self.run_as_task(f, venv_cache_path=tmp_dir, op_args=[4]) # This tests might take longer than default 60 seconds as it is serializing a lot of # context using dill/cloudpickle (which is slow apparently). @pytest.mark.execution_timeout(120) @pytest.mark.parametrize( "serializer", [ pytest.param( "dill", marks=[ DILL_MARKER, pytest.mark.xfail( sys.version_info[:2] == (3, 11), reason=( "Also this test is failed on Python 3.11 because of impact of " "regression in Python 3.11 connected likely with CodeType behaviour " "https://github.com/python/cpython/issues/100316. " "That likely causes that dill is not able to serialize the `conf` correctly. " "Issue about fixing it is captured in https://github.com/apache/airflow/issues/35307" ), ), ], id="dill", ), pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"), ], ) @pytest.mark.skipif( os.environ.get("PYTEST_PLAIN_ASSERTS") != "true", reason="assertion rewriting breaks this test because serializer will try to serialize " "AssertRewritingHook including captured stdout and we need to run " "it with `--assert=plain` pytest option and PYTEST_PLAIN_ASSERTS=true .", ) def test_airflow_context(self, serializer): def f( # basic ds_nodash, inlets, outlets, params, run_id, task_instance_key_str, ts, ts_nodash, ts_nodash_with_tz, # pendulum-specific logical_date, prev_start_date_success, prev_end_date_success, # airflow-specific macros, conf, dag, dag_run, task, # other **context, ): pass self.run_as_operator(f, serializer=serializer, system_site_packages=True, requirements=None) @pytest.mark.parametrize( "serializer", [ pytest.param("dill", marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"), ], ) def test_pendulum_context(self, serializer): def f( # basic ds_nodash, inlets, outlets, run_id, task_instance_key_str, ts, ts_nodash, ts_nodash_with_tz, # pendulum-specific logical_date, prev_start_date_success, prev_end_date_success, # other **context, ): pass self.run_as_task(f, serializer=serializer, system_site_packages=False, requirements=["pendulum"]) @pytest.mark.parametrize( "serializer", [ pytest.param("pickle", id="pickle"), pytest.param("dill", marks=DILL_MARKER, id="dill"), pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"), pytest.param(None, id="default"), ], ) def test_base_context(self, serializer): def f( # basic ds_nodash, inlets, outlets, run_id, task_instance_key_str, ts, ts_nodash, ts_nodash_with_tz, # other **context, ): pass self.run_as_task(f, serializer=serializer, system_site_packages=False, requirements=None) @pytest.mark.parametrize( ("requirements", "system_site", "want_airflow", "want_pendulum"), [ # nothing → just base keys ([], False, False, False), # site-packages → base keys + pendulum keys ([], True, True, True), # apache-airflow / no version constraint (["apache-airflow"], False, True, True), # specific version (["apache-airflow==2.10.2"], False, True, True), # minimum version (["apache-airflow>=2.10"], False, True, True), # pendulum / no version constraint (["pendulum"], False, False, True), # compatible release (["pendulum~=2.1.0"], False, False, True), # other package (["foo==1.0.0"], False, False, False), # with other package (["apache-airflow", "foo"], False, True, True), # full-line comment only (["# comment"], False, False, False), # inline comment after requirement (["apache-airflow==2.10.2 # comment"], False, True, True), # blank line + requirement (["", "pendulum"], False, False, True), # indented comment + requirement ([" # comment", "pendulum~=2.1.0"], False, False, True), # requirements passed as multi-line strings ("funcsigs==0.4\nattrs==23.1.0", False, False, False), (["funcsigs==0.4\nattrs==23.1.0"], False, False, False), ("pendulum==2.1.2 # pinned version\nattrs==23.1.0 # optional", False, False, True), ], ) def test_iter_serializable_context_keys(self, requirements, system_site, want_airflow, want_pendulum): def func(): return "test_return_value" op = PythonVirtualenvOperator( task_id="task", python_callable=func, requirements=requirements, system_site_packages=system_site, ) keys = set(op._iter_serializable_context_keys()) base_keys = set(op.BASE_SERIALIZABLE_CONTEXT_KEYS) airflow_keys = set(op.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS) pendulum_keys = set(op.PENDULUM_SERIALIZABLE_CONTEXT_KEYS) # BASE keys always present assert base_keys <= keys # AIRFLOW keys only when expected if want_airflow: assert airflow_keys <= keys, f"expected AIRFLOW keys for requirements: {requirements}" else: assert not (airflow_keys & keys), f"unexpected AIRFLOW keys for requirements: {requirements}" # PENDULUM keys only when expected if want_pendulum: assert pendulum_keys <= keys, f"expected PENDULUM keys for requirements: {requirements}" else: assert not (pendulum_keys & keys), f"unexpected PENDULUM keys for requirements: {requirements}" @pytest.mark.parametrize( "invalid_requirement", [ # invalid version format "pendulum==3..0", # invalid operator (=< instead of <=) "apache-airflow=<2.0", # same invalid operator on pendulum "pendulum=<3.0", # totally malformed "invalid requirement", ], ) def test_iter_serializable_context_keys_invalid_requirement(self, invalid_requirement): def func(): return "test_return_value" op = PythonVirtualenvOperator( task_id="task", python_callable=func, requirements=[invalid_requirement], system_site_packages=False, ) with pytest.raises(ValueError, match=rf"Invalid requirement '{invalid_requirement}'"): # Consume the generator to trigger parsing list(op._iter_serializable_context_keys()) @mock.patch("airflow.providers.standard.operators.python.PythonVirtualenvOperator._prepare_venv") @mock.patch( "airflow.providers.standard.operators.python.PythonVirtualenvOperator._execute_python_callable_in_subprocess" ) @mock.patch( "airflow.providers.standard.operators.python.PythonVirtualenvOperator._cleanup_python_pycache_dir" ) def test_execute_callable_pycache_cleanup( self, pycache_cleanup_mock, execute_in_subprocess_mock, prepare_venv_mock ): custom_pycache_prefix = "custom/__pycache__" tempdir_name = "tmp" venv_dir_temp_name = "dummy12345/venvrandom" venv_path_tmp = f"/{tempdir_name}/{venv_dir_temp_name}" expected_cleanup_path = Path.cwd() / custom_pycache_prefix / tempdir_name / venv_dir_temp_name def f(): return 1 op = PythonVirtualenvOperator( task_id="task", python_callable=f, system_site_packages=False, ) with mock.patch.object(sys, "pycache_prefix", new=custom_pycache_prefix): with mock.patch( "airflow.providers.standard.operators.python.TemporaryDirectory" ) as mock_temp_dir: mock_context = mock_temp_dir.return_value.__enter__ mock_context.return_value = venv_path_tmp op.execute_callable() execute_in_subprocess_mock.assert_called_once() prepare_venv_mock.assert_called_once_with(Path(venv_path_tmp)) pycache_cleanup_mock.assert_called_once_with(expected_cleanup_path) # when venv tests are run in parallel to other test they create new processes and this might take # quite some time in shared docker environment and get some contention even between different containers # therefore we have to extend timeouts for those tests @pytest.mark.execution_timeout(120) @pytest.mark.external_python_operator
TestPythonVirtualenvOperator
python
walkccc__LeetCode
solutions/3109. Find the Index of Permutation/3109.py
{ "start": 421, "end": 955 }
class ____: def getPermutationIndex(self, perm: list[int]) -> int: MOD = 1_000_000_007 n = len(perm) ans = 0 tree = FenwickTree(n) fact = [1] * (n + 1) # fact[i] := i! for i in range(2, n + 1): fact[i] = (fact[i - 1] * i) % MOD for i, num in enumerate(perm): # the number of unused numbers less than `num` unusedNums = num - 1 - tree.get(num - 1) suffixLength = fact[n - 1 - i] ans += unusedNums * suffixLength ans %= MOD tree.add(num, 1) return ans
Solution
python
psf__black
src/blib2to3/pgen2/pgen.py
{ "start": 13135, "end": 13502 }
class ____: arcs: list[tuple[str | None, "NFAState"]] def __init__(self) -> None: self.arcs = [] # list of (label, NFAState) pairs def addarc(self, next: "NFAState", label: str | None = None) -> None: assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next))
NFAState
python
huggingface__transformers
src/transformers/models/internvl/modular_internvl.py
{ "start": 2678, "end": 4837 }
class ____(JanusVisionAttention): def __init__(self, config: InternVLVisionConfig): super().__init__(config) del self.num_key_value_groups # Needed for flash attention self.is_causal = False qk_norm = config.use_qk_norm self.q_norm = InternVLVisionRMSNorm(self.embed_dim) if qk_norm else nn.Identity() self.k_norm = InternVLVisionRMSNorm(self.embed_dim) if qk_norm else nn.Identity() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ): batch_size, seq_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) query_states = query_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, is_causal=False, **kwargs, ) attn_output = attn_output.reshape(batch_size, seq_len, self.embed_dim) output = self.projection_layer(attn_output) output = self.projection_dropout(output) return output, attn_weights @dataclass @auto_docstring( custom_intro=""" Class for outputs of [`InternVLVisionModel`]. """ )
InternVLVisionAttention
python
pdm-project__pdm
tests/fixtures/projects/test-plugin-pdm/hello.py
{ "start": 48, "end": 380 }
class ____(BaseCommand): """Say hello to somebody""" def add_arguments(self, parser): parser.add_argument("-n", "--name", help="the person's name") def handle(self, project, options): print(f"Hello, {options.name or 'world'}") def main(core): core.register_command(HelloCommand, "hello")
HelloCommand
python
google__jax
tests/package_structure_test.py
{ "start": 958, "end": 3159 }
class ____(jtu.JaxTestCase): @parameterized.parameters([ # TODO(jakevdp): expand test to other public modules. _mod("jax.errors", exclude=["JaxRuntimeError"]), _mod( "jax.numpy", exclude=[ "array_repr", "array_str", "can_cast", "character", "complexfloating", "dtype", "iinfo", "index_exp", "inexact", "integer", "iterable", "finfo", "flexible", "floating", "generic", "get_printoptions", "ndarray", "ndim", "number", "object_", "printoptions", "save", "savez", "set_printoptions", "shape", "signedinteger", "size", "s_", "unsignedinteger", "ComplexWarning", ], ), _mod("jax.numpy.linalg"), _mod("jax.nn.initializers"), _mod( "jax.tree_util", exclude=[ "PyTreeDef", "default_registry", "KeyEntry", "KeyPath", "DictKey", "GetAttrKey", "SequenceKey", "FlattenedIndexKey", ], ), ]) def test_exported_names_match_module(self, module_name, include, exclude): """Test that all public exports have __module__ set correctly.""" module = importlib.import_module(module_name) self.assertEqual(module.__name__, module_name) for name in dir(module): if name not in include and (name.startswith('_') or name in exclude): continue obj = getattr(module, name) if obj is None or isinstance(obj, (bool, int, float, complex, types.ModuleType)): # No __module__ attribute expected. continue self.assertEqual(obj.__module__, module_name, f"{obj} has {obj.__module__=}, expected {module_name}") if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
PackageStructureTest
python
apache__airflow
providers/ftp/tests/unit/ftp/operators/test_ftp.py
{ "start": 1271, "end": 8095 }
class ____: def setup_method(self): self.test_local_dir = "ftptmp" self.test_remote_dir = "/ftphome" self.test_remote_dir_int = "/ftphome/interdir" self.test_local_filename = "test_local_file" self.test_remote_filename = "test_remote_file" self.test_local_filepath = f"{self.test_local_dir}/{self.test_local_filename}" self.test_remote_filepath = f"{self.test_remote_dir}/{self.test_remote_filename}" self.test_local_filepath_int_dir = f"{self.test_local_dir}/{self.test_local_filename}" self.test_remote_filepath_int_dir = f"{self.test_remote_dir_int}/{self.test_remote_filename}" @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.store_file") @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.create_directory") def test_file_transfer_put(self, mock_create_dir, mock_put): ftp_op = FTPFileTransmitOperator( task_id="test_ftp_put", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=self.test_local_filepath, remote_filepath=self.test_remote_filepath, operation=FTPOperation.PUT, ) ftp_op.execute(None) assert not mock_create_dir.called mock_put.assert_called_once_with(self.test_remote_filepath, self.test_local_filepath) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.store_file") @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.create_directory") def test_file_transfer_with_intermediate_dir_put(self, mock_create_dir, mock_put): ftp_op = FTPFileTransmitOperator( task_id="test_ftp_put_imm_dirs", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=self.test_local_filepath, remote_filepath=self.test_remote_filepath_int_dir, operation=FTPOperation.PUT, create_intermediate_dirs=True, ) ftp_op.execute(None) mock_create_dir.assert_called_with(self.test_remote_dir_int) mock_put.assert_called_once_with(self.test_remote_filepath_int_dir, self.test_local_filepath) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.retrieve_file") def test_file_transfer_get(self, mock_get): ftp_op = FTPFileTransmitOperator( task_id="test_ftp_get", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=self.test_local_filepath, remote_filepath=self.test_remote_filepath, operation=FTPOperation.GET, ) ftp_op.execute(None) mock_get.assert_called_once_with(self.test_remote_filepath, self.test_local_filepath) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.retrieve_file") def test_file_transfer_with_intermediate_dir_get(self, mock_get, tmp_path): ftp_op = FTPFileTransmitOperator( task_id="test_ftp_get_imm_dirs", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=str(tmp_path / self.test_local_filepath_int_dir), remote_filepath=self.test_remote_filepath, operation=FTPOperation.GET, create_intermediate_dirs=True, ) ftp_op.execute(None) assert len(list(tmp_path.iterdir())) == 1 mock_get.assert_called_once_with( self.test_remote_filepath, str(tmp_path / self.test_local_filepath_int_dir) ) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.retrieve_file") def test_multiple_paths_get(self, mock_get): local_filepath = ["/tmp/ltest1", "/tmp/ltest2"] remote_filepath = ["/tmp/rtest1", "/tmp/rtest2"] ftp_op = FTPFileTransmitOperator( task_id="test_multiple_paths_get", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=local_filepath, remote_filepath=remote_filepath, operation=FTPOperation.GET, ) ftp_op.execute(None) assert mock_get.call_count == 2 for count, (args, _) in enumerate(mock_get.call_args_list): assert args == (remote_filepath[count], local_filepath[count]) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.store_file") def test_multiple_paths_put(self, mock_put): local_filepath = ["/tmp/ltest1", "/tmp/ltest2"] remote_filepath = ["/tmp/rtest1", "/tmp/rtest2"] ftp_op = FTPFileTransmitOperator( task_id="test_multiple_paths_put", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=local_filepath, remote_filepath=remote_filepath, operation=FTPOperation.PUT, ) ftp_op.execute(None) assert mock_put.call_count == 2 for count, (args, _) in enumerate(mock_put.call_args_list): assert args == (remote_filepath[count], local_filepath[count]) @mock.patch("airflow.providers.ftp.operators.ftp.FTPHook.store_file") def test_arg_checking(self, mock_put): dag = DAG( dag_id="unit_tests_ftp_op_arg_checking", schedule=None, default_args={"start_date": DEFAULT_DATE}, ) # If ftp_conn_id is not passed in, it should be assigned the default connection id task_0 = FTPFileTransmitOperator( task_id="test_ftp_args_0", local_filepath=self.test_local_filepath, remote_filepath=self.test_remote_filepath, operation=FTPOperation.PUT, dag=dag, ) task_0.execute(None) assert task_0.ftp_conn_id == DEFAULT_CONN_ID # Exception should be raised if operation is invalid task_1 = FTPFileTransmitOperator( task_id="test_ftp_args_1", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=self.test_local_filepath, remote_filepath=self.test_remote_filepath, operation="invalid_operation", dag=dag, ) with pytest.raises(TypeError, match="Unsupported operation value invalid_operation, "): task_1.execute(None) def test_unequal_local_remote_file_paths(self): with pytest.raises(ValueError, match="1 paths in local_filepath != 2 paths in remote_filepath"): FTPFileTransmitOperator( task_id="test_ftp_unequal_paths", ftp_conn_id=DEFAULT_CONN_ID, local_filepath="/tmp/test", remote_filepath=["/tmp/test1", "/tmp/test2"], ).execute(None) with pytest.raises(ValueError, match="2 paths in local_filepath != 1 paths in remote_filepath"): FTPFileTransmitOperator( task_id="test_ftp_unequal_paths", ftp_conn_id=DEFAULT_CONN_ID, local_filepath=["/tmp/test1", "/tmp/test2"], remote_filepath="/tmp/test1", ).execute(None)
TestFTPFileTransmitOperator
python
kamyu104__LeetCode-Solutions
Python/path-crossing.py
{ "start": 29, "end": 536 }
class ____(object): def isPathCrossing(self, path): """ :type path: str :rtype: bool """ x = y = 0 lookup = {(0, 0)} for c in path: if c == 'E': x += 1 elif c == 'W': x -= 1 elif c == 'N': y += 1 elif c == 'S': y -= 1 if (x, y) in lookup: return True lookup.add((x, y)) return False
Solution
python
fastai__fastai
fastai/vision/augment.py
{ "start": 41293, "end": 42466 }
class ____(AffineCoordTfm): "Apply perspective warping with `magnitude` and `p` on a batch of matrices" def __init__(self, magnitude:float=0.2, # The default warping magnitude p:float=0.5, # Probability of applying warp draw_x:float|MutableSequence|Callable=None, # User defined warping magnitude in x draw_y:float|MutableSequence|Callable=None, # User defined warping magnitude in y size:int|tuple=None, # Output size, duplicated if one value is specified mode:str='bilinear', # PyTorch `F.grid_sample` interpolation pad_mode=PadMode.Reflection, # A `PadMode` batch:bool=False, # Apply identical warp to entire batch align_corners:bool=True # PyTorch `F.grid_sample` align_corners ): store_attr() coord_fs = _WarpCoord(magnitude=magnitude, p=p, draw_x=draw_x, draw_y=draw_y, batch=batch) super().__init__(coord_fs=coord_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners ) # %% ../../nbs/09_vision.augment.ipynb 193 @patch def lighting(x: TensorImage, func): return torch.sigmoid(func(logit(x))) # %% ../../nbs/09_vision.augment.ipynb 199
Warp
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/missingSuper1.py
{ "start": 436, "end": 635 }
class ____(ParentA, ParentB): # This should generate an error. def __init__(self): pass # This should generate an error. def __init_subclass__(cls) -> None: pass
ChildA
python
openai__openai-python
src/openai/resources/beta/assistants.py
{ "start": 46958, "end": 47597 }
class ____: def __init__(self, assistants: AsyncAssistants) -> None: self._assistants = assistants self.create = async_to_streamed_response_wrapper( assistants.create, ) self.retrieve = async_to_streamed_response_wrapper( assistants.retrieve, ) self.update = async_to_streamed_response_wrapper( assistants.update, ) self.list = async_to_streamed_response_wrapper( assistants.list, ) self.delete = async_to_streamed_response_wrapper( assistants.delete, )
AsyncAssistantsWithStreamingResponse
python
joke2k__faker
faker/providers/date_time/fr_FR/__init__.py
{ "start": 46, "end": 785 }
class ____(DateTimeProvider): DAY_NAMES = { "0": "dimanche", "1": "lundi", "2": "mardi", "3": "mercredi", "4": "jeudi", "5": "vendredi", "6": "samedi", } MONTH_NAMES = { "01": "Janvier", "02": "Février", "03": "Mars", "04": "Avril", "05": "Mai", "06": "Juin", "07": "Juillet", "08": "Août", "09": "Septembre", "10": "Octobre", "11": "Novembre", "12": "Décembre", } def day_of_week(self) -> str: day = self.date("%w") return self.DAY_NAMES[day] def month_name(self) -> str: month = self.month() return self.MONTH_NAMES[month]
Provider
python
simonw__datasette
datasette/permissions.py
{ "start": 847, "end": 3739 }
class ____(ABC): """ Base class for all resource types. Each subclass represents a type of resource (e.g., TableResource, DatabaseResource). The class itself carries metadata about the resource type. Instances represent specific resources. """ # Class-level metadata (subclasses must define these) name: str = None # e.g., "table", "database", "model" parent_class: type["Resource"] | None = None # e.g., DatabaseResource for tables # Instance-level optional extra attributes reasons: list[str] | None = None include_reasons: bool | None = None def __init__(self, parent: str | None = None, child: str | None = None): """ Create a resource instance. Args: parent: The parent identifier (meaning depends on resource type) child: The child identifier (meaning depends on resource type) """ self.parent = parent self.child = child self._private = None # Sentinel to track if private was set @property def private(self) -> bool: """ Whether this resource is private (accessible to actor but not anonymous). This property is only available on Resource objects returned from allowed_resources() when include_is_private=True is used. Raises: AttributeError: If accessed without calling include_is_private=True """ if self._private is None: raise AttributeError( "The 'private' attribute is only available when using " "allowed_resources(..., include_is_private=True)" ) return self._private @private.setter def private(self, value: bool): self._private = value @classmethod def __init_subclass__(cls): """ Validate resource hierarchy doesn't exceed 2 levels. Raises: ValueError: If this resource would create a 3-level hierarchy """ super().__init_subclass__() if cls.parent_class is None: return # Top of hierarchy, nothing to validate # Check if our parent has a parent - that would create 3 levels if cls.parent_class.parent_class is not None: # We have a parent, and that parent has a parent # This creates a 3-level hierarchy, which is not allowed raise ValueError( f"Resource {cls.__name__} creates a 3-level hierarchy: " f"{cls.parent_class.parent_class.__name__} -> {cls.parent_class.__name__} -> {cls.__name__}. " f"Maximum 2 levels allowed (parent -> child)." ) @classmethod @abstractmethod def resources_sql(cls) -> str: """ Return SQL query that returns all resources of this type. Must return two columns: parent, child """ pass
Resource
python
apache__airflow
airflow-core/src/airflow/serialization/json_schema.py
{ "start": 1106, "end": 2445 }
class ____(Protocol): """ This class is only used for type checking. A workaround for IDEs, mypy, etc. due to the way ``Draft7Validator`` is created. They are created or do not inherit from proper classes. Hence, you can not have ``type: Draft7Validator``. """ schema: dict def is_valid(self, instance) -> bool: """Check if the instance is valid under the current schema.""" ... def validate(self, instance) -> None: """Check if the instance is valid under the current schema, raising validation error if not.""" ... def iter_errors(self, instance) -> Iterable[jsonschema.exceptions.ValidationError]: """Lazily yield each of the validation errors in the given instance.""" ... def load_dag_schema_dict() -> dict: """Load & return Json Schema for DAG as Python dict.""" schema_file_name = "schema.json" schema_file = pkgutil.get_data(__name__, schema_file_name) if schema_file is None: raise AirflowException(f"Schema file {schema_file_name} does not exists") schema = json.loads(schema_file.decode()) return schema def load_dag_schema() -> Validator: """Load & Validate Json Schema for DAG.""" import jsonschema schema = load_dag_schema_dict() return jsonschema.Draft7Validator(schema)
Validator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py
{ "start": 19496, "end": 20355 }
class ____(Metafield): """ { productVariants(query: "updated_at:>='2023-02-07T00:00:00+00:00' AND updated_at:<='2023-12-04T00:00:00+00:00'") { edges { node { id metafields { edges { node { id namespace value key description createdAt updatedAt type } } } } } } } """ sort_key = None type = MetafieldType.PRODUCT_VARIANTS
MetafieldProductVariant
python
jazzband__django-simple-history
simple_history/tests/tests/utils.py
{ "start": 2126, "end": 3554 }
class ____: def db_for_read(self, model, **hints): # Avoids circular importing from ..models import HistoricalModelWithHistoryInDifferentDb if model == HistoricalModelWithHistoryInDifferentDb: return OTHER_DB_NAME return None def db_for_write(self, model, **hints): # Avoids circular importing from ..models import HistoricalModelWithHistoryInDifferentDb if model == HistoricalModelWithHistoryInDifferentDb: return OTHER_DB_NAME return None def allow_relation(self, obj1, obj2, **hints): # Avoids circular importing from ..models import HistoricalModelWithHistoryInDifferentDb if isinstance(obj1, HistoricalModelWithHistoryInDifferentDb) or isinstance( obj2, HistoricalModelWithHistoryInDifferentDb ): return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): # Avoids circular importing from ..models import HistoricalModelWithHistoryInDifferentDb if model_name == HistoricalModelWithHistoryInDifferentDb._meta.model_name: return db == OTHER_DB_NAME return None database_router_override_settings_history_in_diff_db = { "DATABASE_ROUTERS": [ "simple_history.tests.tests.utils.TestModelWithHistoryInDifferentDbRouter" ] }
TestModelWithHistoryInDifferentDbRouter
python
davidhalter__jedi
jedi/inference/base_value.py
{ "start": 13759, "end": 18219 }
class ____: def __init__(self, iterable): self._set = frozenset(iterable) for value in iterable: assert not isinstance(value, ValueSet) @classmethod def _from_frozen_set(cls, frozenset_): self = cls.__new__(cls) self._set = frozenset_ return self @classmethod def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() for set_ in sets: if isinstance(set_, ValueSet): aggregated |= set_._set else: aggregated |= frozenset(set_) return cls._from_frozen_set(frozenset(aggregated)) def __or__(self, other): return self._from_frozen_set(self._set | other._set) def __and__(self, other): return self._from_frozen_set(self._set & other._set) def __iter__(self): return iter(self._set) def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return 'S{%s}' % (', '.join(str(s) for s in self._set)) def filter(self, filter_func): return self.__class__(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return self.from_sets( getattr(value, name)(*args, **kwargs) for value in self._set ) return mapper def __eq__(self, other): return self._set == other._set def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._set) def py__class__(self): return ValueSet(c.py__class__() for c in self._set) def iterate(self, contextualized_node=None, is_async=False): from jedi.inference.lazy_value import get_merged_lazy_value type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_values in zip_longest(*type_iters): yield get_merged_lazy_value( [l for l in lazy_values if l is not None] ) def execute(self, arguments): return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) def execute_with_values(self, *args, **kwargs): return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) def goto(self, *args, **kwargs): return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) def py__getattribute__(self, *args, **kwargs): return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) def get_item(self, *args, **kwargs): return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) def try_merge(self, function_name): value_set = self.__class__([]) for c in self._set: try: method = getattr(c, function_name) except AttributeError: pass else: value_set |= method() return value_set def gather_annotation_classes(self): return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) def get_signatures(self): return [sig for c in self._set for sig in c.get_signatures()] def get_type_hint(self, add_class_info=True): t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] type_hints = sorted(filter(None, t)) if len(type_hints) == 1: return type_hints[0] optional = 'None' in type_hints if optional: type_hints.remove('None') if len(type_hints) == 0: return None elif len(type_hints) == 1: s = type_hints[0] else: s = 'Union[%s]' % ', '.join(type_hints) if optional: s = 'Optional[%s]' % s return s def infer_type_vars(self, value_set): # Circular from jedi.inference.gradual.annotation import merge_type_var_dicts type_var_dict = {} for value in self._set: merge_type_var_dicts( type_var_dict, value.infer_type_vars(value_set), ) return type_var_dict NO_VALUES = ValueSet([]) def iterator_to_value_set(func): def wrapper(*args, **kwargs): return ValueSet(func(*args, **kwargs)) return wrapper
ValueSet
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 223772, "end": 224977 }
class ____(Response): """ Response of tasks.edit_configuration endpoint. :param updated: Indicates if the task was updated successfully :type updated: int """ _service = "tasks" _action = "edit_configuration" _version = "2.13" _schema = { "definitions": {}, "properties": { "updated": { "description": "Indicates if the task was updated successfully", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(EditConfigurationResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
EditConfigurationResponse
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/stackdriver.py
{ "start": 1176, "end": 1433 }
class ____(BaseGoogleLink): """Helper class for constructing Stackdriver Notifications Link.""" name = "Cloud Monitoring Notifications" key = "stackdriver_notifications" format_str = STACKDRIVER_NOTIFICATIONS_LINK
StackdriverNotificationsLink
python
altair-viz__altair
altair/vegalite/v6/schema/channels.py
{ "start": 597506, "end": 627007 }
class ____( FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefGradientstringnull, ): r""" Stroke schema wrapper. Parameters ---------- shorthand : str, dict, Sequence[str], :class:`RepeatRef` shorthand for field, aggregate, and type aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb'] Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``, ``"min"``, ``"max"``, ``"count"``). **Default value:** ``undefined`` (None) **See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__ documentation. bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. bin : bool, dict, :class:`BinParams`, None A flag for binning a ``quantitative`` field, `an object defining binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating that the data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (``"binned"``). * If ``true``, default `binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be applied. * If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are already binned. You can map the bin-start field to ``x`` (or ``y``) and the bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can also set the axis's `tickMinStep <https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property. **Default value:** ``false`` **See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__ documentation. condition : dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`] One or more value definition(s) with `a parameter or a test predicate <https://vega.github.io/vega-lite/docs/condition.html>`__. **Note:** A field definition's ``condition`` property can only contain `conditional value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__ since Vega-Lite only allows at most one encoded field per encoding channel. field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef` **Required.** A string defining the name of the field from which to pull a data value or an object defining iterated values from the `repeat <https://vega.github.io/vega-lite/docs/repeat.html>`__ operator. **See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__ documentation. **Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If field names contain dots or brackets but are not nested, you can use ``\\`` to escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details about escaping in the `field documentation <https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required if ``aggregate`` is ``count``. legend : dict, :class:`Legend`, None An object defining properties of the legend. If ``null``, the legend for the encoding channel will be removed. **Default value:** If undefined, default `legend properties <https://vega.github.io/vega-lite/docs/legend.html>`__ are applied. **See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__ documentation. scale : dict, :class:`Scale`, None An object defining properties of the channel's scale, which is the function that transforms values in the data domain (numbers, dates, strings, etc) to visual values (pixels, colors, sizes) of the encoding channels. If ``null``, the scale will be `disabled and the data value will be directly encoded <https://vega.github.io/vega-lite/docs/scale.html#disable>`__. **Default value:** If undefined, default `scale properties <https://vega.github.io/vega-lite/docs/scale.html>`__ are applied. **See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__ documentation. sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None Sort order for the encoded field. For continuous fields (quantitative or temporal), ``sort`` can be either ``"ascending"`` or ``"descending"``. For discrete fields, ``sort`` can be one of the following: * ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in JavaScript. * `A string indicating an encoding channel name to sort by <https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g., ``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g., ``"-x"`` to sort by x-field, descending). This channel string is short-form of `a sort-by-encoding definition <https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order": "descending"}``. * `A sort field definition <https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by another field. * `An array specifying the field values in preferred order <https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the sort order will obey the values in the array, followed by any unspecified values in their original order. For discrete time field, values in the sort array can be `date-time definition objects <https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time units ``"month"`` and ``"day"``, the values can be the month or day names (case insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``). * ``null`` indicating no sort. **Default value:** ``"ascending"`` **Note:** ``null`` and sorting by another channel is not supported for ``row`` and ``column``. **See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__ documentation. timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds'] Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal field. or `a temporal field that gets casted as ordinal <https://vega.github.io/vega-lite/docs/type.html#cast>`__. **Default value:** ``undefined`` (None) **See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__ documentation. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _class_is_valid_at_instantiation = False _encoding_name = "stroke" @overload def aggregate(self, _: NonArgAggregateOp_T, /) -> Stroke: ... @overload def aggregate( self, *, argmax: Optional[str | SchemaBase] = Undefined ) -> Stroke: ... @overload def aggregate( self, *, argmin: Optional[str | SchemaBase] = Undefined ) -> Stroke: ... @overload def bandPosition(self, _: float, /) -> Stroke: ... @overload def bin(self, _: bool | Bin | None, /) -> Stroke: ... @overload def bin( self, *, anchor: Optional[float] = Undefined, base: Optional[float] = Undefined, binned: Optional[bool] = Undefined, divide: Optional[Sequence[float]] = Undefined, extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined, maxbins: Optional[float] = Undefined, minstep: Optional[float] = Undefined, nice: Optional[bool] = Undefined, step: Optional[float] = Undefined, steps: Optional[Sequence[float]] = Undefined, ) -> Stroke: ... @overload def condition( self, *, test: Optional[str | SchemaBase | Map] = Undefined, value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined, ) -> Stroke: ... @overload def condition( self, *, empty: Optional[bool] = Undefined, param: Optional[str | SchemaBase] = Undefined, value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined, ) -> Stroke: ... @overload def condition( self, _: list[core.ConditionalValueDefGradientstringnullExprRef], / ) -> Stroke: ... @overload def field(self, _: str | RepeatRef, /) -> Stroke: ... @overload def field( self, *, repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined, ) -> Stroke: ... @overload def legend(self, _: Legend | None, /) -> Stroke: ... @overload def legend( self, *, aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined, clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined, columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, columns: Optional[float | Parameter | SchemaBase | Map] = Undefined, cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined, description: Optional[str | Parameter | SchemaBase | Map] = Undefined, direction: Optional[SchemaBase | Orientation_T] = Undefined, fillColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, format: Optional[str | SchemaBase | Map] = Undefined, formatType: Optional[str] = Undefined, gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientStrokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined, gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined, labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined, labelBaseline: Optional[ Parameter | SchemaBase | Map | TextBaseline_T ] = Undefined, labelColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, labelExpr: Optional[str] = Undefined, labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined, labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined, labelFontWeight: Optional[ Parameter | SchemaBase | Map | FontWeight_T ] = Undefined, labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOverlap: Optional[ bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map ] = Undefined, labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined, legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined, legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined, offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, orient: Optional[SchemaBase | LegendOrient_T] = Undefined, padding: Optional[float | Parameter | SchemaBase | Map] = Undefined, rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolDash: Optional[ Parameter | SchemaBase | Sequence[float] | Map ] = Undefined, symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolFillColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolStrokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined, tickCount: Optional[ float | Parameter | SchemaBase | Map | TimeInterval_T ] = Undefined, tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined, titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined, titleBaseline: Optional[ Parameter | SchemaBase | Map | TextBaseline_T ] = Undefined, titleColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined, titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined, titleFontWeight: Optional[ Parameter | SchemaBase | Map | FontWeight_T ] = Undefined, titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined, titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, type: Optional[Literal["symbol", "gradient"]] = Undefined, values: Optional[ Parameter | SchemaBase | Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[Temporal | SchemaBase | Map] | Map ] = Undefined, zindex: Optional[float] = Undefined, ) -> Stroke: ... @overload def scale(self, _: Scale | None, /) -> Stroke: ... @overload def scale( self, *, align: Optional[float | Parameter | SchemaBase | Map] = Undefined, base: Optional[float | Parameter | SchemaBase | Map] = Undefined, bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined, clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined, constant: Optional[float | Parameter | SchemaBase | Map] = Undefined, domain: Optional[ Parameter | SchemaBase | Literal["unaggregated"] | Sequence[ str | bool | float | Temporal | Parameter | SchemaBase | Map | None ] | Map ] = Undefined, domainMax: Optional[ float | Temporal | Parameter | SchemaBase | Map ] = Undefined, domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined, domainMin: Optional[ float | Temporal | Parameter | SchemaBase | Map ] = Undefined, domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined, exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined, interpolate: Optional[ Parameter | SchemaBase | Map | ScaleInterpolateEnum_T ] = Undefined, nice: Optional[ bool | float | Parameter | SchemaBase | Map | TimeInterval_T ] = Undefined, padding: Optional[float | Parameter | SchemaBase | Map] = Undefined, paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined, paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined, range: Optional[ SchemaBase | Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map] | Map | RangeEnum_T ] = Undefined, rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined, rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined, reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined, round: Optional[bool | Parameter | SchemaBase | Map] = Undefined, scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined, type: Optional[SchemaBase | ScaleType_T] = Undefined, zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined, ) -> Stroke: ... @overload def sort( self, _: Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[DateTime | Temporal] | AllSortString_T | None, /, ) -> Stroke: ... @overload def sort( self, *, field: Optional[str | SchemaBase | Map] = Undefined, op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined, order: Optional[SchemaBase | SortOrder_T | None] = Undefined, ) -> Stroke: ... @overload def sort( self, *, encoding: Optional[SchemaBase | SortByChannel_T] = Undefined, order: Optional[SchemaBase | SortOrder_T | None] = Undefined, ) -> Stroke: ... @overload def timeUnit( self, _: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T, /, ) -> Stroke: ... @overload def timeUnit( self, *, binned: Optional[bool] = Undefined, maxbins: Optional[float] = Undefined, step: Optional[float] = Undefined, unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined, utc: Optional[bool] = Undefined, ) -> Stroke: ... @overload def title(self, _: str | Sequence[str] | None, /) -> Stroke: ... @overload def type(self, _: StandardType_T, /) -> Stroke: ... def __init__( self, shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined, aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined, bandPosition: Optional[float] = Undefined, bin: Optional[bool | SchemaBase | Map | None] = Undefined, condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined, field: Optional[str | SchemaBase | Map] = Undefined, legend: Optional[SchemaBase | Map | None] = Undefined, scale: Optional[SchemaBase | Map | None] = Undefined, sort: Optional[ SchemaBase | Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[Temporal | SchemaBase | Map] | Map | AllSortString_T | None ] = Undefined, timeUnit: Optional[ SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T ] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[SchemaBase | StandardType_T] = Undefined, **kwds, ): super().__init__( shorthand=shorthand, aggregate=aggregate, bandPosition=bandPosition, bin=bin, condition=condition, field=field, legend=legend, scale=scale, sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds, ) @with_property_setters
Stroke
python
matplotlib__matplotlib
lib/matplotlib/tests/test_mlab.py
{ "start": 603, "end": 8329 }
class ____: def setup_method(self): np.random.seed(0) n = 1000 x = np.linspace(0., 100, n) self.sig_zeros = np.zeros(n) self.sig_off = self.sig_zeros + 100. self.sig_slope = np.linspace(-10., 90., n) self.sig_slope_mean = x - x.mean() self.sig_base = ( np.random.standard_normal(n) + np.sin(x*2*np.pi/(n/100))) self.sig_base -= self.sig_base.mean() def allclose(self, *args): assert_allclose(*args, atol=1e-8) def test_detrend_none(self): assert mlab.detrend_none(0.) == 0. assert mlab.detrend_none(0., axis=1) == 0. assert mlab.detrend(0., key="none") == 0. assert mlab.detrend(0., key=mlab.detrend_none) == 0. for sig in [ 5.5, self.sig_off, self.sig_slope, self.sig_base, (self.sig_base + self.sig_slope + self.sig_off).tolist(), np.vstack([self.sig_base, # 2D case. self.sig_base + self.sig_off, self.sig_base + self.sig_slope, self.sig_base + self.sig_off + self.sig_slope]), np.vstack([self.sig_base, # 2D transposed case. self.sig_base + self.sig_off, self.sig_base + self.sig_slope, self.sig_base + self.sig_off + self.sig_slope]).T, ]: if isinstance(sig, np.ndarray): assert_array_equal(mlab.detrend_none(sig), sig) else: assert mlab.detrend_none(sig) == sig def test_detrend_mean(self): for sig in [0., 5.5]: # 0D. assert mlab.detrend_mean(sig) == 0. assert mlab.detrend(sig, key="mean") == 0. assert mlab.detrend(sig, key=mlab.detrend_mean) == 0. # 1D. self.allclose(mlab.detrend_mean(self.sig_zeros), self.sig_zeros) self.allclose(mlab.detrend_mean(self.sig_base), self.sig_base) self.allclose(mlab.detrend_mean(self.sig_base + self.sig_off), self.sig_base) self.allclose(mlab.detrend_mean(self.sig_base + self.sig_slope), self.sig_base + self.sig_slope_mean) self.allclose( mlab.detrend_mean(self.sig_base + self.sig_slope + self.sig_off), self.sig_base + self.sig_slope_mean) def test_detrend_mean_1d_base_slope_off_list_andor_axis0(self): input = self.sig_base + self.sig_slope + self.sig_off target = self.sig_base + self.sig_slope_mean self.allclose(mlab.detrend_mean(input, axis=0), target) self.allclose(mlab.detrend_mean(input.tolist()), target) self.allclose(mlab.detrend_mean(input.tolist(), axis=0), target) def test_detrend_mean_2d(self): input = np.vstack([self.sig_off, self.sig_base + self.sig_off]) target = np.vstack([self.sig_zeros, self.sig_base]) self.allclose(mlab.detrend_mean(input), target) self.allclose(mlab.detrend_mean(input, axis=None), target) self.allclose(mlab.detrend_mean(input.T, axis=None).T, target) self.allclose(mlab.detrend(input), target) self.allclose(mlab.detrend(input, axis=None), target) self.allclose( mlab.detrend(input.T, key="constant", axis=None), target.T) input = np.vstack([self.sig_base, self.sig_base + self.sig_off, self.sig_base + self.sig_slope, self.sig_base + self.sig_off + self.sig_slope]) target = np.vstack([self.sig_base, self.sig_base, self.sig_base + self.sig_slope_mean, self.sig_base + self.sig_slope_mean]) self.allclose(mlab.detrend_mean(input.T, axis=0), target.T) self.allclose(mlab.detrend_mean(input, axis=1), target) self.allclose(mlab.detrend_mean(input, axis=-1), target) self.allclose(mlab.detrend(input, key="default", axis=1), target) self.allclose(mlab.detrend(input.T, key="mean", axis=0), target.T) self.allclose( mlab.detrend(input.T, key=mlab.detrend_mean, axis=0), target.T) def test_detrend_ValueError(self): for signal, kwargs in [ (self.sig_slope[np.newaxis], {"key": "spam"}), (self.sig_slope[np.newaxis], {"key": 5}), (5.5, {"axis": 0}), (self.sig_slope, {"axis": 1}), (self.sig_slope[np.newaxis], {"axis": 2}), ]: with pytest.raises(ValueError): mlab.detrend(signal, **kwargs) def test_detrend_mean_ValueError(self): for signal, kwargs in [ (5.5, {"axis": 0}), (self.sig_slope, {"axis": 1}), (self.sig_slope[np.newaxis], {"axis": 2}), ]: with pytest.raises(ValueError): mlab.detrend_mean(signal, **kwargs) def test_detrend_linear(self): # 0D. assert mlab.detrend_linear(0.) == 0. assert mlab.detrend_linear(5.5) == 0. assert mlab.detrend(5.5, key="linear") == 0. assert mlab.detrend(5.5, key=mlab.detrend_linear) == 0. for sig in [ # 1D. self.sig_off, self.sig_slope, self.sig_slope + self.sig_off, ]: self.allclose(mlab.detrend_linear(sig), self.sig_zeros) def test_detrend_str_linear_1d(self): input = self.sig_slope + self.sig_off target = self.sig_zeros self.allclose(mlab.detrend(input, key="linear"), target) self.allclose(mlab.detrend(input, key=mlab.detrend_linear), target) self.allclose(mlab.detrend_linear(input.tolist()), target) def test_detrend_linear_2d(self): input = np.vstack([self.sig_off, self.sig_slope, self.sig_slope + self.sig_off]) target = np.vstack([self.sig_zeros, self.sig_zeros, self.sig_zeros]) self.allclose( mlab.detrend(input.T, key="linear", axis=0), target.T) self.allclose( mlab.detrend(input.T, key=mlab.detrend_linear, axis=0), target.T) self.allclose( mlab.detrend(input, key="linear", axis=1), target) self.allclose( mlab.detrend(input, key=mlab.detrend_linear, axis=1), target) with pytest.raises(ValueError): mlab.detrend_linear(self.sig_slope[np.newaxis]) @pytest.mark.parametrize('iscomplex', [False, True], ids=['real', 'complex'], scope='class') @pytest.mark.parametrize('sides', ['onesided', 'twosided', 'default'], scope='class') @pytest.mark.parametrize( 'fstims,len_x,NFFT_density,nover_density,pad_to_density,pad_to_spectrum', [ ([], None, -1, -1, -1, -1), ([4], None, -1, -1, -1, -1), ([4, 5, 10], None, -1, -1, -1, -1), ([], None, None, -1, -1, None), ([], None, -1, -1, None, None), ([], None, None, -1, None, None), ([], 1024, 512, -1, -1, 128), ([], 256, -1, -1, 33, 257), ([], 255, 33, -1, -1, None), ([], 256, 128, -1, 256, 256), ([], None, -1, 32, -1, -1), ], ids=[ 'nosig', 'Fs4', 'FsAll', 'nosig_noNFFT', 'nosig_nopad_to', 'nosig_noNFFT_no_pad_to', 'nosig_trim', 'nosig_odd', 'nosig_oddlen', 'nosig_stretch', 'nosig_overlap', ], scope='class')
TestDetrend
python
django__django
tests/model_enums/tests.py
{ "start": 630, "end": 756 }
class ____(models.IntegerChoices): CAR = 1, "Carriage" TRUCK = 2 JET_SKI = 3 __empty__ = _("(Unknown)")
Vehicle
python
scipy__scipy
scipy/optimize/_differentiable_functions.py
{ "start": 29316, "end": 29960 }
class ____(LinearVectorFunction): """Identity vector function and its derivatives. The Jacobian is the identity matrix, returned as a dense array when `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is identically zero and it is returned as a csr matrix. """ def __init__(self, x0, sparse_jacobian): n = len(x0) if sparse_jacobian or sparse_jacobian is None: A = sps.eye_array(n, format='csr') sparse_jacobian = True else: A = np.eye(n) sparse_jacobian = False super().__init__(A, x0, sparse_jacobian)
IdentityVectorFunction
python
python__mypy
mypy/patterns.py
{ "start": 3339, "end": 4048 }
class ____(Pattern): """The pattern Cls(...)""" class_ref: RefExpr positionals: list[Pattern] keyword_keys: list[str] keyword_values: list[Pattern] def __init__( self, class_ref: RefExpr, positionals: list[Pattern], keyword_keys: list[str], keyword_values: list[Pattern], ) -> None: super().__init__() assert len(keyword_keys) == len(keyword_values) self.class_ref = class_ref self.positionals = positionals self.keyword_keys = keyword_keys self.keyword_values = keyword_values def accept(self, visitor: PatternVisitor[T]) -> T: return visitor.visit_class_pattern(self)
ClassPattern
python
numpy__numpy
numpy/_core/tests/test_indexing.py
{ "start": 22257, "end": 22549 }
class ____: def test_scalar_return_type(self): # Field access on an array should return an array, even if it # is 0-d. a = np.zeros((), [('a', 'f8')]) assert_(isinstance(a['a'], np.ndarray)) assert_(isinstance(a[['a']], np.ndarray))
TestFieldIndexing
python
apache__airflow
providers/google/tests/unit/google/cloud/triggers/test_dataflow.py
{ "start": 29236, "end": 34348 }
class ____: def test_serialize(self, dataflow_start_yaml_job_trigger): actual_data = dataflow_start_yaml_job_trigger.serialize() expected_data = ( "airflow.providers.google.cloud.triggers.dataflow.DataflowStartYamlJobTrigger", { "project_id": PROJECT_ID, "job_id": JOB_ID, "location": LOCATION, "gcp_conn_id": GCP_CONN_ID, "poll_sleep": POLL_SLEEP, "expected_terminal_state": None, "impersonation_chain": IMPERSONATION_CHAIN, "cancel_timeout": CANCEL_TIMEOUT, }, ) assert actual_data == expected_data @pytest.mark.parametrize( ("attr", "expected"), [ ("gcp_conn_id", GCP_CONN_ID), ("poll_sleep", POLL_SLEEP), ("impersonation_chain", IMPERSONATION_CHAIN), ("cancel_timeout", CANCEL_TIMEOUT), ], ) def test_get_async_hook(self, dataflow_start_yaml_job_trigger, attr, expected): hook = dataflow_start_yaml_job_trigger._get_async_hook() actual = hook._hook_kwargs.get(attr) assert actual is not None assert actual == expected @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook.get_job") async def test_run_loop_return_success_event( self, mock_get_job, dataflow_start_yaml_job_trigger, test_dataflow_batch_job ): mock_get_job.return_value = test_dataflow_batch_job expected_event = TriggerEvent( { "job": Job.to_dict(test_dataflow_batch_job), "status": "success", "message": "Batch job completed.", } ) actual_event = await dataflow_start_yaml_job_trigger.run().asend(None) assert actual_event == expected_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook.get_job") async def test_run_loop_return_failed_event( self, mock_get_job, dataflow_start_yaml_job_trigger, test_dataflow_batch_job ): test_dataflow_batch_job.current_state = JobState.JOB_STATE_FAILED mock_get_job.return_value = test_dataflow_batch_job expected_event = TriggerEvent( { "job": Job.to_dict(test_dataflow_batch_job), "status": "error", "message": "Job failed.", } ) actual_event = await dataflow_start_yaml_job_trigger.run().asend(None) assert actual_event == expected_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook.get_job") async def test_run_loop_return_stopped_event( self, mock_get_job, dataflow_start_yaml_job_trigger, test_dataflow_batch_job ): test_dataflow_batch_job.current_state = JobState.JOB_STATE_STOPPED mock_get_job.return_value = test_dataflow_batch_job expected_event = TriggerEvent( { "job": Job.to_dict(test_dataflow_batch_job), "status": "stopped", "message": "Job was stopped.", } ) actual_event = await dataflow_start_yaml_job_trigger.run().asend(None) assert actual_event == expected_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook.get_job") async def test_run_loop_return_expected_state_event( self, mock_get_job, dataflow_start_yaml_job_trigger, test_dataflow_batch_job ): dataflow_start_yaml_job_trigger.expected_terminal_state = DataflowJobStatus.JOB_STATE_RUNNING test_dataflow_batch_job.current_state = JobState.JOB_STATE_RUNNING mock_get_job.return_value = test_dataflow_batch_job expected_event = TriggerEvent( { "job": Job.to_dict(test_dataflow_batch_job), "status": "success", "message": f"Job reached the expected terminal state: {DataflowJobStatus.JOB_STATE_RUNNING}.", } ) actual_event = await dataflow_start_yaml_job_trigger.run().asend(None) assert actual_event == expected_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.hooks.dataflow.AsyncDataflowHook.get_job") async def test_run_loop_is_still_running( self, mock_get_job, dataflow_start_yaml_job_trigger, test_dataflow_batch_job ): """Test that DataflowStartYamlJobTrigger is still in loop if the job status neither terminal nor expected.""" dataflow_start_yaml_job_trigger.expected_terminal_state = DataflowJobStatus.JOB_STATE_STOPPED test_dataflow_batch_job.current_state = JobState.JOB_STATE_RUNNING mock_get_job.return_value = test_dataflow_batch_job task = asyncio.create_task(dataflow_start_yaml_job_trigger.run().__anext__()) await asyncio.sleep(0.5) assert task.done() is False task.cancel()
TestDataflowStartYamlJobTrigger
python
kamyu104__LeetCode-Solutions
Python/minimize-or-of-remaining-elements-using-operations.py
{ "start": 52, "end": 635 }
class ____(object): def minOrAfterOperations(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ result = 0 l = max(nums).bit_length() mask = (1<<l)-1 for i in reversed(xrange(l)): result <<= 1 curr, cnt = mask, 0 for x in nums: curr &= x>>i if curr&~result: cnt += 1 else: curr = mask if cnt > k: result += 1 return result
Solution
python
scipy__scipy
benchmarks/benchmarks/peak_finding.py
{ "start": 1072, "end": 1523 }
class ____(Benchmark): """Benchmark `scipy.signal.peak_widths`.""" param_names = ['rel_height'] params = [[0, 0.25, 0.5, 0.75, 1]] def setup(self, rel_height): self.x = electrocardiogram() self.peaks = find_peaks(self.x)[0] self.prominence_data = peak_prominences(self.x, self.peaks) def time_peak_widths(self, rel_height): peak_widths(self.x, self.peaks, rel_height, self.prominence_data)
PeakWidths
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 246754, "end": 247397 }
class ____(Structure): _fields_ = [ ("lowPwrThreshold", c_uint), ] def nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, l1threshold): c_info = c_nvmlNvLinkPowerThres_t() c_info.lowPwrThreshold = l1threshold fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvLinkDeviceLowPowerThreshold") ret = fn(device, byref(c_info)) _nvmlCheckReturn(ret) return NVML_SUCCESS NVML_GPU_FABRIC_UUID_LEN = 16 _nvmlGpuFabricState_t = c_uint NVML_GPU_FABRIC_STATE_NOT_SUPPORTED = 0 NVML_GPU_FABRIC_STATE_NOT_STARTED = 1 NVML_GPU_FABRIC_STATE_IN_PROGRESS = 2 NVML_GPU_FABRIC_STATE_COMPLETED = 3
c_nvmlNvLinkPowerThres_t
python
tensorflow__tensorflow
tensorflow/python/ops/image_ops_test.py
{ "start": 30262, "end": 32025 }
class ____(test.Benchmark): def _benchmarkResize(self, image_size, num_channels): batch_size = 1 num_ops = 1000 img = variables.Variable( random_ops.random_normal( [batch_size, image_size[0], image_size[1], num_channels]), name="img") deps = [] for _ in range(num_ops): with ops.control_dependencies(deps): resize_op = image_ops.resize_bicubic( img, [299, 299], align_corners=False) deps = [resize_op] benchmark_op = control_flow_ops.group(*deps) with self.benchmark_session() as sess: self.evaluate(variables.global_variables_initializer()) results = self.run_op_benchmark( sess, benchmark_op, min_iters=20, name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1], num_channels))) print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"] / (batch_size * num_ops))) def benchmarkSimilar3Channel(self): self._benchmarkResize((183, 229), 3) def benchmarkScaleUp3Channel(self): self._benchmarkResize((141, 186), 3) def benchmarkScaleDown3Channel(self): self._benchmarkResize((749, 603), 3) def benchmarkSimilar1Channel(self): self._benchmarkResize((183, 229), 1) def benchmarkScaleUp1Channel(self): self._benchmarkResize((141, 186), 1) def benchmarkScaleDown1Channel(self): self._benchmarkResize((749, 603), 1) def benchmarkSimilar4Channel(self): self._benchmarkResize((183, 229), 4) def benchmarkScaleUp4Channel(self): self._benchmarkResize((141, 186), 4) def benchmarkScaleDown4Channel(self): self._benchmarkResize((749, 603), 4)
ResizeBicubicBenchmark
python
django-guardian__django-guardian
guardian/mixins.py
{ "start": 838, "end": 2506 }
class ____: """A login required mixin for use with class-based views. This Class is a light wrapper around the Django `login_required` decorator, function parameters are instead attributes defined on the class. Due to Python Method Resolution Order (MRO), this mixin must be added as the left most mixin of a view. Attributes: redirect_field_name (str): *Default*: `'next'` login_url (str): *Default*: `settings.LOGIN_URL` Example: ```python from guardian.mixins import LoginRequiredMixin from django.views.generic import View class SecretView(LoginRequiredMixin, View): redirect_field_name = 'foobar' login_url = '/let-me-in/' def get(self, request): return HttpResponse('secret-view') ``` Note: The mixin has exactly the same flow as `login_required` decorator: - If the user isn't logged in, redirect to `settings.LOGIN_URL`, passing the current absolute path in the query string. - Example: `/accounts/login/?next=/polls/3/`. - If the user is logged in, execute the view normally. The view code is free to assume the user is logged in. See Also: - [Python MRO historical reference](https://docs.python.org/3/howto/mro.html) """ redirect_field_name = REDIRECT_FIELD_NAME login_url = settings.LOGIN_URL def dispatch(self, request, *args, **kwargs): return login_required(redirect_field_name=self.redirect_field_name, login_url=self.login_url)(super().dispatch)( request, *args, **kwargs )
LoginRequiredMixin
python
ray-project__ray
python/ray/tests/test_multi_node_3.py
{ "start": 8044, "end": 13068 }
class ____: def __init__(self, val: Optional[int]): self.val: Optional[int] = val def custom_serializer(o: CustomObject) -> int: return o.val * multiplier def custom_deserializer(val: int) -> CustomObject: return CustomObject(val) ray.util.register_serializer( CustomObject, serializer=custom_serializer, deserializer=custom_deserializer, ) @ray.remote def f(o: CustomObject) -> int: return o.val print("Value is:", ray.get(f.remote(CustomObject(1)))) """ assert "Value is: 2" in run_string_as_driver( driver_script.format(address=address_info["address"], multiplier="2") ) assert "Value is: 4" in run_string_as_driver( driver_script.format(address=address_info["address"], multiplier="4") ) assert "Value is: 8" in run_string_as_driver( driver_script.format(address=address_info["address"], multiplier="8") ) def test_multi_driver_logging(ray_start_regular): address = ray_start_regular["address"] # ray.init(address=address) driver1_wait = Semaphore.options(name="driver1_wait").remote(value=0) driver2_wait = Semaphore.options(name="driver2_wait").remote(value=0) main_wait = Semaphore.options(name="main_wait").remote(value=0) # The creation of an actor is asynchronous. # We need to wait for the completion of the actor creation, # otherwise we can't get the actor by name. ray.get(driver1_wait.locked.remote()) ray.get(driver2_wait.locked.remote()) ray.get(main_wait.locked.remote()) # Params are address, semaphore name, output1, output2 driver_script_template = """ import ray import sys from ray._common.test_utils import Semaphore @ray.remote(num_cpus=0) def remote_print(s, file=None): print(s, file=file) ray.init(address="{}", namespace="default_test_namespace") driver_wait = ray.get_actor("{}") main_wait = ray.get_actor("main_wait") ray.get(main_wait.release.remote()) ray.get(driver_wait.acquire.remote()) s1 = "{}" ray.get(remote_print.remote(s1)) ray.get(main_wait.release.remote()) ray.get(driver_wait.acquire.remote()) s2 = "{}" ray.get(remote_print.remote(s2)) ray.get(main_wait.release.remote()) """ p1 = run_string_as_driver_nonblocking( driver_script_template.format(address, "driver1_wait", "message1", "message2") ) p2 = run_string_as_driver_nonblocking( driver_script_template.format(address, "driver2_wait", "message3", "message4") ) ray.get(main_wait.acquire.remote()) ray.get(main_wait.acquire.remote()) # At this point both of the other drivers are fully initialized. ray.get(driver1_wait.release.remote()) ray.get(driver2_wait.release.remote()) # At this point driver1 should receive 'message1' and driver2 'message3' ray.get(main_wait.acquire.remote()) ray.get(main_wait.acquire.remote()) ray.get(driver1_wait.release.remote()) ray.get(driver2_wait.release.remote()) # At this point driver1 should receive 'message2' and driver2 'message4' ray.get(main_wait.acquire.remote()) ray.get(main_wait.acquire.remote()) driver1_out = p1.stdout.read().decode("ascii") driver2_out = p2.stdout.read().decode("ascii") assert "message1" in driver1_out assert "message2" in driver1_out assert "message3" in driver2_out assert "message4" in driver2_out @pytest.fixture def redis_proc(): """Download external redis and start the subprocess.""" REDIS_SERVER_PATH = "core/src/ray/thirdparty/redis/src/redis-server" full_path = Path(ray.__file__).parents[0] / REDIS_SERVER_PATH check_call_subprocess(["cp", f"{full_path}", "redis-server"]) proc = subprocess.Popen(["./redis-server", "--port", "7999"]) yield proc subprocess.check_call(["ray", "stop"]) os.kill(proc.pid, 9) subprocess.check_call(["rm", "-rf", "redis-server"]) @pytest.mark.skipif( sys.platform == "win32", reason=( "Feature not supported Windows because Redis " "is not officially supported by Windows. " "(There cannot be external Redis in Windows)" ), ) def test_ray_stop_should_not_kill_external_redis(redis_proc): check_call_ray(["start", "--head"]) subprocess.check_call(["ray", "stop"]) assert redis_proc.poll() is None def test_ray_stop_kill_workers(): check_call_ray(["start", "--head"]) ray.init(address="auto") @ray.remote class Actor: async def ping(self): return os.getpid() async def run_forever(self): while True: await asyncio.sleep(5) actor = Actor.options(lifetime="detached", name="A").remote() actor.run_forever.remote() actor_pid = ray.get(actor.ping.remote()) ray.shutdown() check_call_ray(["stop", "--force"]) assert not psutil.pid_exists(actor_pid) if __name__ == "__main__": # Make subprocess happy in bazel. os.environ["LC_ALL"] = "en_US.UTF-8" os.environ["LANG"] = "en_US.UTF-8" sys.exit(pytest.main(["-sv", __file__]))
CustomObject
python
django__django
tests/invalid_models_tests/test_relative_fields.py
{ "start": 84133, "end": 92282 }
class ____(TestCase): def test_db_cascade_support(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_CASCADE) field = Child._meta.get_field("parent") expected = ( [] if connection.features.supports_on_delete_db_cascade else [ Error( f"{connection.display_name} does not support a DB_CASCADE.", hint="Change the on_delete rule to CASCADE.", obj=field, id="fields.E324", ) ] ) self.assertEqual(field.check(databases=self.databases), expected) def test_db_cascade_required_db_features(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_CASCADE) class Meta: required_db_features = {"supports_on_delete_db_cascade"} field = Child._meta.get_field("parent") self.assertEqual(field.check(databases=self.databases), []) def test_db_set_default_support(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey( Parent, models.DB_SET_DEFAULT, db_default=models.Value(1) ) field = Child._meta.get_field("parent") expected = ( [] if connection.features.supports_on_delete_db_default else [ Error( f"{connection.display_name} does not support a DB_SET_DEFAULT.", hint="Change the on_delete rule to SET_DEFAULT.", obj=field, id="fields.E324", ) ] ) self.assertEqual(field.check(databases=self.databases), expected) def test_db_set_default_required_db_features(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey( Parent, models.DB_SET_DEFAULT, db_default=models.Value(1) ) class Meta: required_db_features = {"supports_on_delete_db_default"} field = Child._meta.get_field("parent") self.assertEqual(field.check(databases=self.databases), []) @skipUnlessDBFeature("supports_on_delete_db_default") def test_db_set_default_no_db_default(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_SET_DEFAULT) field = Child._meta.get_field("parent") self.assertEqual( field.check(databases=self.databases), [ Error( "Field specifies on_delete=DB_SET_DEFAULT, but has no db_default " "value.", hint="Set a db_default value, or change the on_delete rule.", obj=field, id="fields.E322", ) ], ) def test_db_set_null_support(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_SET_NULL, null=True) field = Child._meta.get_field("parent") expected = ( [] if connection.features.supports_on_delete_db_null else [ Error( f"{connection.display_name} does not support a DB_SET_NULL.", hint="Change the on_delete rule to SET_NULL.", obj=field, id="fields.E324", ) ] ) self.assertEqual(field.check(databases=self.databases), expected) def test_db_set_null_required_db_features(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_SET_NULL, null=True) class Meta: required_db_features = {"supports_on_delete_db_null"} field = Child._meta.get_field("parent") self.assertEqual(field.check(databases=self.databases), []) def test_python_db_chain(self): class GrandParent(models.Model): pass class Parent(models.Model): grand_parent = models.ForeignKey(GrandParent, models.DB_CASCADE) class Child(models.Model): parent = models.ForeignKey(Parent, models.RESTRICT) field = Child._meta.get_field("parent") self.assertEqual( field.check(databases=self.databases), [ Error( "Field specifies Python-level on_delete variant, but referenced " "model uses database-level variant.", hint=( "Use either database or Python on_delete variants uniformly in " "the references chain." ), obj=field, id="fields.E323", ) ], ) @skipUnlessDBFeature("supports_on_delete_db_null") def test_db_python_chain(self): class GrandParent(models.Model): pass class Parent(models.Model): grand_parent = models.ForeignKey(GrandParent, models.CASCADE) class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_SET_NULL, null=True) field = Child._meta.get_field("parent") self.assertEqual( field.check(databases=self.databases), [ Error( "Field specifies database-level on_delete variant, but referenced " "model uses Python-level variant.", hint=( "Use either database or Python on_delete variants uniformly in " "the references chain." ), obj=field, id="fields.E323", ) ], ) @skipUnlessDBFeature("supports_on_delete_db_cascade") def test_db_python_chain_auto_created(self): class GrandParent(models.Model): pass class Parent(GrandParent): pass class Child(models.Model): parent = models.ForeignKey(Parent, on_delete=models.DB_CASCADE) field = Child._meta.get_field("parent") self.assertEqual( field.check(databases=self.databases), [ Error( "Field specifies database-level on_delete variant, but referenced " "model uses Python-level variant.", hint=( "Use either database or Python on_delete variants uniformly in " "the references chain." ), obj=field, id="fields.E323", ) ], ) @skipUnlessDBFeature("supports_on_delete_db_null") def test_db_do_nothing_chain(self): class GrandParent(models.Model): pass class Parent(models.Model): grand_parent = models.ForeignKey(GrandParent, models.DO_NOTHING) class Child(models.Model): parent = models.ForeignKey(Parent, models.DB_SET_NULL, null=True) field = Child._meta.get_field("parent") self.assertEqual(field.check(databases=self.databases), []) def test_do_nothing_db_chain(self): class GrandParent(models.Model): pass class Parent(models.Model): grand_parent = models.ForeignKey(GrandParent, models.DB_SET_NULL, null=True) class Child(models.Model): parent = models.ForeignKey(Parent, models.DO_NOTHING) field = Child._meta.get_field("parent") self.assertEqual(field.check(databases=self.databases), [])
DatabaseLevelOnDeleteTests
python
fastapi__sqlmodel
docs_src/tutorial/relationship_attributes/read_relationships/tutorial001.py
{ "start": 338, "end": 3852 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: Optional[int] = Field(default=None, index=True) team_id: Optional[int] = Field(default=None, foreign_key="team.id") team: Optional[Team] = Relationship(back_populates="heroes") sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): with Session(engine) as session: team_preventers = Team(name="Preventers", headquarters="Sharp Tower") team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar") hero_deadpond = Hero( name="Deadpond", secret_name="Dive Wilson", team=team_z_force ) hero_rusty_man = Hero( name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers ) hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") session.add(hero_deadpond) session.add(hero_rusty_man) session.add(hero_spider_boy) session.commit() session.refresh(hero_deadpond) session.refresh(hero_rusty_man) session.refresh(hero_spider_boy) print("Created hero:", hero_deadpond) print("Created hero:", hero_rusty_man) print("Created hero:", hero_spider_boy) hero_spider_boy.team = team_preventers session.add(hero_spider_boy) session.commit() session.refresh(hero_spider_boy) print("Updated hero:", hero_spider_boy) hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E") team_wakaland = Team( name="Wakaland", headquarters="Wakaland Capital City", heroes=[hero_black_lion, hero_sure_e], ) session.add(team_wakaland) session.commit() session.refresh(team_wakaland) print("Team Wakaland:", team_wakaland) hero_tarantula = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_dr_weird = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_cap = Hero( name="Captain North America", secret_name="Esteban Rogelios", age=93 ) team_preventers.heroes.append(hero_tarantula) team_preventers.heroes.append(hero_dr_weird) team_preventers.heroes.append(hero_cap) session.add(team_preventers) session.commit() session.refresh(hero_tarantula) session.refresh(hero_dr_weird) session.refresh(hero_cap) print("Preventers new hero:", hero_tarantula) print("Preventers new hero:", hero_dr_weird) print("Preventers new hero:", hero_cap) def select_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.name == "Spider-Boy") result = session.exec(statement) hero_spider_boy = result.one() statement = select(Team).where(Team.id == hero_spider_boy.team_id) result = session.exec(statement) team = result.first() print("Spider-Boy's team:", team) print("Spider-Boy's team again:", hero_spider_boy.team) def main(): create_db_and_tables() create_heroes() select_heroes() if __name__ == "__main__": main()
Hero
python
h5py__h5py
h5py/tests/test_dataset.py
{ "start": 68316, "end": 68672 }
class ____(BaseDataset): """ Retrieval of a single field from a scalar compound dataset should strip the field info """ def test_scalar_compound(self): dt = np.dtype([('a', 'i')]) dset = self.f.create_dataset(make_name(), (), dtype=dt) self.assertEqual(dset['a'].dtype, np.dtype('i'))
TestScalarCompound
python
dagster-io__dagster
python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/test_asset_specs.py
{ "start": 10908, "end": 13235 }
class ____(DagsterFivetranTranslator): def get_asset_spec(self, props: FivetranConnectorTableProps) -> AssetSpec: default_spec = super().get_asset_spec(props) return default_spec.replace_attributes( key=["wacky", *["".join(reversed(item)) for item in default_spec.key.path], "wow"], ) @pytest.mark.parametrize( "translator, expected_key", [ ( MyCustomTranslator, AssetKey(["prefix", "schema_name_in_destination_1", "table_name_in_destination_1"]), ), ( MyCustomTranslatorWackyKeys, AssetKey( ["wacky", "1_noitanitsed_ni_eman_amehcs", "1_noitanitsed_ni_eman_elbat", "wow"] ), ), ], ids=["custom_translator", "custom_translator_wacky_keys"], ) def test_translator_custom_metadata_materialize( fetch_workspace_data_api_mocks: responses.RequestsMock, sync_and_poll: MagicMock, translator: type[DagsterFivetranTranslator], expected_key: AssetKey, ) -> None: with environ({"FIVETRAN_API_KEY": TEST_API_KEY, "FIVETRAN_API_SECRET": TEST_API_SECRET}): resource = FivetranWorkspace( account_id=TEST_ACCOUNT_ID, api_key=EnvVar("FIVETRAN_API_KEY"), api_secret=EnvVar("FIVETRAN_API_SECRET"), ) @fivetran_assets( connector_id=TEST_CONNECTOR_ID, workspace=resource, dagster_fivetran_translator=translator(), ) def my_fivetran_assets_def(context: AssetExecutionContext, fivetran: FivetranWorkspace): yield from fivetran.sync_and_poll(context=context) result = materialize([my_fivetran_assets_def], resources={"fivetran": resource}) assert result.success asset_materializations = [ event for event in result.all_events if event.event_type_value == "ASSET_MATERIALIZATION" ] assert len(asset_materializations) == 4 materialized_asset_keys = { asset_materialization.asset_key for asset_materialization in asset_materializations } assert len(materialized_asset_keys) == 4 assert my_fivetran_assets_def.keys == materialized_asset_keys assert expected_key in materialized_asset_keys
MyCustomTranslatorWackyKeys
python
lxml__lxml
src/lxml/tests/test_relaxng.py
{ "start": 258, "end": 6839 }
class ____(HelperTestCase): def test_relaxng(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(schema) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertFalse(schema.validate(tree_invalid)) self.assertTrue(schema.error_log.filter_from_errors()) self.assertTrue(schema.validate(tree_valid)) # repeat valid self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid def test_relaxng_stringio(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema_file = BytesIO(b'''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(file=schema_file) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) def test_relaxng_elementtree_error(self): self.assertRaises(ValueError, etree.RelaxNG, etree.ElementTree()) def test_relaxng_error(self): tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(schema) self.assertFalse(schema.validate(tree_invalid)) errors = schema.error_log self.assertTrue([log for log in errors if log.level_name == "ERROR"]) self.assertTrue([log for log in errors if "not expect" in log.message]) def test_relaxng_generic_error(self): tree_invalid = self.parse('''\ <test> <reference id="my-ref">This is my unique ref.</reference> <data ref="my-ref">Valid data</data> <data ref="myref">Invalid data</data> </test> ''') schema = self.parse('''\ <grammar datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes" xmlns="http://relaxng.org/ns/structure/1.0"> <define name="by-ref"> <data type="IDREF"/> </define> <start> <element name="test"> <zeroOrMore> <element name="reference"> <attribute name="id"> <data type="ID"/> </attribute> <text/> </element> </zeroOrMore> <zeroOrMore> <element name="data"> <attribute name="ref"> <data type="IDREF"/> </attribute> <text/> </element> </zeroOrMore> </element> </start> </grammar> ''') schema = etree.RelaxNG(schema) self.assertFalse(schema.validate(tree_invalid)) errors = schema.error_log self.assertTrue(errors) self.assertTrue([log for log in errors if "IDREF" in log.message]) self.assertTrue([log for log in errors if "myref" in log.message]) def test_relaxng_invalid_schema(self): schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b" /> </zeroOrMore> </element> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema2(self): schema = self.parse('''\ <grammar xmlns="http://relaxng.org/ns/structure/1.0" /> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema3(self): schema = self.parse('''\ <grammar xmlns="http://relaxng.org/ns/structure/1.0"> <define name="test"> <element name="test"/> </define> </grammar> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema4(self): # segfault schema = self.parse('''\ <element name="a" xmlns="mynamespace" /> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_include(self): # this will only work if we access the file through path or # file object.. f = open(fileInTestDir('test1.rng'), 'rb') try: schema = etree.RelaxNG(file=f) finally: f.close() def test_relaxng_shortcut(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') self.assertTrue(tree_valid.relaxng(schema)) self.assertFalse(tree_invalid.relaxng(schema)) def test_multiple_elementrees(self): tree = self.parse('<a><b>B</b><c>C</c></a>') schema = etree.RelaxNG( self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <element name="b"> <text /> </element> <element name="c"> <text /> </element> </element> ''') ) self.assertTrue(schema.validate(tree)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertTrue(schema.validate(tree)) # repeat valid self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid schema = etree.RelaxNG( self.parse('''\ <element name="b" xmlns="http://relaxng.org/ns/structure/1.0"> <text /> </element> ''') ) c_tree = etree.ElementTree(tree.getroot()[1]) self.assertEqual(self._rootstring(c_tree), b'<c>C</c>') self.assertFalse(schema.validate(c_tree)) self.assertTrue(schema.error_log.filter_from_errors()) b_tree = etree.ElementTree(tree.getroot()[0]) self.assertEqual(self._rootstring(b_tree), b'<b>B</b>') self.assertTrue(schema.validate(b_tree)) self.assertFalse(schema.error_log.filter_from_errors())
ETreeRelaxNGTestCase
python
django__django
django/views/generic/edit.py
{ "start": 5619, "end": 5714 }
class ____(FormMixin, ProcessFormView): """A base view for displaying a form."""
BaseFormView
python
pytest-dev__pytest
testing/test_recwarn.py
{ "start": 2471, "end": 4335 }
class ____: def test_recording(self) -> None: rec = WarningsRecorder(_ispytest=True) with rec: assert not rec.list warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 warnings.warn(DeprecationWarning("hello")) assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" values = rec.list rec.clear() assert len(rec.list) == 0 assert values is rec.list pytest.raises(AssertionError, rec.pop) def test_warn_stacklevel(self) -> None: """#4243""" rec = WarningsRecorder(_ispytest=True) with rec: warnings.warn("test", DeprecationWarning, 2) def test_typechecking(self) -> None: from _pytest.recwarn import WarningsChecker with pytest.raises(TypeError): WarningsChecker(5, _ispytest=True) # type: ignore[arg-type] with pytest.raises(TypeError): WarningsChecker(("hi", RuntimeWarning), _ispytest=True) # type: ignore[arg-type] with pytest.raises(TypeError): WarningsChecker([DeprecationWarning, RuntimeWarning], _ispytest=True) # type: ignore[arg-type] def test_invalid_enter_exit(self) -> None: # wrap this test in WarningsRecorder to ensure warning state gets reset with WarningsRecorder(_ispytest=True): with pytest.raises(RuntimeError): rec = WarningsRecorder(_ispytest=True) rec.__exit__(None, None, None) # can't exit before entering with pytest.raises(RuntimeError): rec = WarningsRecorder(_ispytest=True) with rec: with rec: pass # can't enter twice
TestWarningsRecorderChecker
python
kennethreitz__tablib
src/tablib/formats/_dbf.py
{ "start": 169, "end": 1951 }
class ____: title = 'dbf' extensions = ('csv',) DEFAULT_ENCODING = 'utf-8' @classmethod def export_set(cls, dataset): """Returns DBF representation of a Dataset""" new_dbf = dbfnew.dbf_new() temp_file, temp_uri = tempfile.mkstemp() # create the appropriate fields based on the contents of the first row first_row = dataset[0] for fieldname, field_value in zip(dataset.headers, first_row): if type(field_value) in [int, float]: new_dbf.add_field(fieldname, 'N', 10, 8) else: new_dbf.add_field(fieldname, 'C', 80) new_dbf.write(temp_uri) dbf_file = dbf.Dbf(temp_uri, readOnly=0) for row in dataset: record = dbfrecord.DbfRecord(dbf_file) for fieldname, field_value in zip(dataset.headers, row): record[fieldname] = field_value record.store() dbf_file.close() dbf_stream = open(temp_uri, 'rb') stream = io.BytesIO(dbf_stream.read()) dbf_stream.close() os.close(temp_file) os.remove(temp_uri) return stream.getvalue() @classmethod def import_set(cls, dset, in_stream, headers=True): """Returns a dataset from a DBF stream.""" dset.wipe() _dbf = dbf.Dbf(in_stream) dset.headers = _dbf.fieldNames for record in range(_dbf.recordCount): row = [_dbf[record][f] for f in _dbf.fieldNames] dset.append(row) @classmethod def detect(cls, stream): """Returns True if the given stream is valid DBF""" try: _dbf = dbf.Dbf(stream, readOnly=True) return True except Exception: return False
DBFFormat
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/enum1.py
{ "start": 7849, "end": 7970 }
class ____(Enum, metaclass=CustomEnumMeta1): @property def value(self) -> str: return "test"
TestEnum21Base
python
HypothesisWorks__hypothesis
hypothesis-python/tests/django/toystore/models.py
{ "start": 2705, "end": 2770 }
class ____(models.Model): file1 = models.FileField()
FileFields
python
getsentry__sentry
src/sentry/api/serializers/rest_framework/project.py
{ "start": 317, "end": 1582 }
class ____(serializers.Field): def __init__( self, scope: str | Collection[str] = "project:write", id_allowed: bool = False, **kwags ): """ The scope parameter specifies which permissions are required to access the project field. If multiple scopes are provided, the project can be accessed when the user is authenticated with any of the scopes. """ self.scope = scope self.id_allowed = id_allowed super().__init__(**kwags) def to_representation(self, value): return value def to_internal_value(self, data): try: if self.id_allowed: project = Project.objects.get( organization=self.context["organization"], slug__id_or_slug=data ) else: project = Project.objects.get(organization=self.context["organization"], slug=data) except Project.DoesNotExist: raise ValidationError("Invalid project") scopes = (self.scope,) if isinstance(self.scope, str) else self.scope if not self.context["access"].has_any_project_scope(project, scopes): raise ValidationError("Insufficient access to project") return project
ProjectField
python
encode__starlette
tests/test_routing.py
{ "start": 30580, "end": 42291 }
class ____: def __init__(self, app: ASGIApp) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: scope["add_headers_middleware"] = True async def modified_send(msg: Message) -> None: if msg["type"] == "http.response.start": msg["headers"].append((b"X-Test", b"Set by middleware")) await send(msg) await self.app(scope, receive, modified_send) def assert_middleware_header_route(request: Request) -> Response: assert request.scope["add_headers_middleware"] is True return Response() route_with_middleware = Starlette( routes=[ Route( "/http", endpoint=assert_middleware_header_route, methods=["GET"], middleware=[Middleware(AddHeadersMiddleware)], ), Route("/home", homepage), ] ) mounted_routes_with_middleware = Starlette( routes=[ Mount( "/http", routes=[ Route( "/", endpoint=assert_middleware_header_route, methods=["GET"], name="route", ), ], middleware=[Middleware(AddHeadersMiddleware)], ), Route("/home", homepage), ] ) mounted_app_with_middleware = Starlette( routes=[ Mount( "/http", app=Route( "/", endpoint=assert_middleware_header_route, methods=["GET"], name="route", ), middleware=[Middleware(AddHeadersMiddleware)], ), Route("/home", homepage), ] ) @pytest.mark.parametrize( "app", [ mounted_routes_with_middleware, mounted_app_with_middleware, route_with_middleware, ], ) def test_base_route_middleware( test_client_factory: TestClientFactory, app: Starlette, ) -> None: test_client = test_client_factory(app) response = test_client.get("/home") assert response.status_code == 200 assert "X-Test" not in response.headers response = test_client.get("/http") assert response.status_code == 200 assert response.headers["X-Test"] == "Set by middleware" def test_mount_routes_with_middleware_url_path_for() -> None: """Checks that url_path_for still works with mounted routes with Middleware""" assert mounted_routes_with_middleware.url_path_for("route") == "/http/" def test_mount_asgi_app_with_middleware_url_path_for() -> None: """Mounted ASGI apps do not work with url path for, middleware does not change this """ with pytest.raises(NoMatchFound): mounted_app_with_middleware.url_path_for("route") def test_add_route_to_app_after_mount( test_client_factory: Callable[..., TestClient], ) -> None: """Checks that Mount will pick up routes added to the underlying app after it is mounted """ inner_app = Router() app = Mount("/http", app=inner_app) inner_app.add_route( "/inner", endpoint=homepage, methods=["GET"], ) client = test_client_factory(app) response = client.get("/http/inner") assert response.status_code == 200 def test_exception_on_mounted_apps( test_client_factory: TestClientFactory, ) -> None: def exc(request: Request) -> None: raise Exception("Exc") sub_app = Starlette(routes=[Route("/", exc)]) app = Starlette(routes=[Mount("/sub", app=sub_app)]) client = test_client_factory(app) with pytest.raises(Exception) as ctx: client.get("/sub/") assert str(ctx.value) == "Exc" def test_mounted_middleware_does_not_catch_exception( test_client_factory: Callable[..., TestClient], ) -> None: # https://github.com/Kludex/starlette/pull/1649#discussion_r960236107 def exc(request: Request) -> Response: raise HTTPException(status_code=403, detail="auth") class NamedMiddleware: def __init__(self, app: ASGIApp, name: str) -> None: self.app = app self.name = name async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async def modified_send(msg: Message) -> None: if msg["type"] == "http.response.start": msg["headers"].append((f"X-{self.name}".encode(), b"true")) await send(msg) await self.app(scope, receive, modified_send) app = Starlette( routes=[ Mount( "/mount", routes=[ Route("/err", exc), Route("/home", homepage), ], middleware=[Middleware(NamedMiddleware, name="Mounted")], ), Route("/err", exc), Route("/home", homepage), ], middleware=[Middleware(NamedMiddleware, name="Outer")], ) client = test_client_factory(app) resp = client.get("/home") assert resp.status_code == 200, resp.content assert "X-Outer" in resp.headers resp = client.get("/err") assert resp.status_code == 403, resp.content assert "X-Outer" in resp.headers resp = client.get("/mount/home") assert resp.status_code == 200, resp.content assert "X-Mounted" in resp.headers resp = client.get("/mount/err") assert resp.status_code == 403, resp.content assert "X-Mounted" in resp.headers def test_websocket_route_middleware( test_client_factory: TestClientFactory, ) -> None: async def websocket_endpoint(session: WebSocket) -> None: await session.accept() await session.send_text("Hello, world!") await session.close() class WebsocketMiddleware: def __init__(self, app: ASGIApp) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: async def modified_send(msg: Message) -> None: if msg["type"] == "websocket.accept": msg["headers"].append((b"X-Test", b"Set by middleware")) await send(msg) await self.app(scope, receive, modified_send) app = Starlette( routes=[ WebSocketRoute( "/ws", endpoint=websocket_endpoint, middleware=[Middleware(WebsocketMiddleware)], ) ] ) client = test_client_factory(app) with client.websocket_connect("/ws") as websocket: text = websocket.receive_text() assert text == "Hello, world!" assert websocket.extra_headers == [(b"X-Test", b"Set by middleware")] def test_route_repr() -> None: route = Route("/welcome", endpoint=homepage) assert repr(route) == "Route(path='/welcome', name='homepage', methods=['GET', 'HEAD'])" def test_route_repr_without_methods() -> None: route = Route("/welcome", endpoint=Endpoint, methods=None) assert repr(route) == "Route(path='/welcome', name='Endpoint', methods=[])" def test_websocket_route_repr() -> None: route = WebSocketRoute("/ws", endpoint=websocket_endpoint) assert repr(route) == "WebSocketRoute(path='/ws', name='websocket_endpoint')" def test_mount_repr() -> None: route = Mount( "/app", routes=[ Route("/", endpoint=homepage), ], ) # test for substring because repr(Router) returns unique object ID assert repr(route).startswith("Mount(path='/app', name='', app=") def test_mount_named_repr() -> None: route = Mount( "/app", name="app", routes=[ Route("/", endpoint=homepage), ], ) # test for substring because repr(Router) returns unique object ID assert repr(route).startswith("Mount(path='/app', name='app', app=") def test_host_repr() -> None: route = Host( "example.com", app=Router( [ Route("/", endpoint=homepage), ] ), ) # test for substring because repr(Router) returns unique object ID assert repr(route).startswith("Host(host='example.com', name='', app=") def test_host_named_repr() -> None: route = Host( "example.com", name="app", app=Router( [ Route("/", endpoint=homepage), ] ), ) # test for substring because repr(Router) returns unique object ID assert repr(route).startswith("Host(host='example.com', name='app', app=") def test_decorator_deprecations() -> None: router = Router() with pytest.deprecated_call(): router.route("/")(homepage) with pytest.deprecated_call(): router.websocket_route("/ws")(websocket_endpoint) with pytest.deprecated_call(): async def startup() -> None: ... # pragma: no cover router.on_event("startup")(startup) async def echo_paths(request: Request, name: str) -> JSONResponse: return JSONResponse( { "name": name, "path": request.scope["path"], "root_path": request.scope["root_path"], } ) async def pure_asgi_echo_paths(scope: Scope, receive: Receive, send: Send, name: str) -> None: data = {"name": name, "path": scope["path"], "root_path": scope["root_path"]} content = json.dumps(data).encode("utf-8") await send( { "type": "http.response.start", "status": 200, "headers": [(b"content-type", b"application/json")], } ) await send({"type": "http.response.body", "body": content}) echo_paths_routes = [ Route( "/path", functools.partial(echo_paths, name="path"), name="path", methods=["GET"], ), Route( "/root-queue/path", functools.partial(echo_paths, name="queue_path"), name="queue_path", methods=["POST"], ), Mount("/asgipath", app=functools.partial(pure_asgi_echo_paths, name="asgipath")), Mount( "/sub", name="mount", routes=[ Route( "/path", functools.partial(echo_paths, name="subpath"), name="subpath", methods=["GET"], ), ], ), ] def test_paths_with_root_path(test_client_factory: TestClientFactory) -> None: app = Starlette(routes=echo_paths_routes) client = test_client_factory(app, base_url="https://www.example.org/", root_path="/root") response = client.get("/root/path") assert response.status_code == 200 assert response.json() == { "name": "path", "path": "/root/path", "root_path": "/root", } response = client.get("/root/asgipath/") assert response.status_code == 200 assert response.json() == { "name": "asgipath", "path": "/root/asgipath/", # Things that mount other ASGI apps, like WSGIMiddleware, would not be aware # of the prefixed path, and would have their own notion of their own paths, # so they need to be able to rely on the root_path to know the location they # are mounted on "root_path": "/root/asgipath", } response = client.get("/root/sub/path") assert response.status_code == 200 assert response.json() == { "name": "subpath", "path": "/root/sub/path", "root_path": "/root/sub", } response = client.post("/root/root-queue/path") assert response.status_code == 200 assert response.json() == { "name": "queue_path", "path": "/root/root-queue/path", "root_path": "/root", }
AddHeadersMiddleware
python
huggingface__transformers
tests/models/vitpose/test_modeling_vitpose.py
{ "start": 8326, "end": 12340 }
class ____(unittest.TestCase): @cached_property def default_image_processor(self): return ( VitPoseImageProcessor.from_pretrained("usyd-community/vitpose-base-simple") if is_vision_available() else None ) @slow def test_inference_pose_estimation(self): image_processor = self.default_image_processor model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device) image = prepare_img() boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]] inputs = image_processor(images=image, boxes=boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) heatmaps = outputs.heatmaps assert heatmaps.shape == (2, 17, 64, 48) expected_slice = torch.tensor( [ [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], ] ).to(torch_device) assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4) pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes)[0] expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034]) expected_keypoints = torch.tensor( [ [3.9813e02, 1.8184e02], [3.9828e02, 1.7981e02], [3.9596e02, 1.7948e02], ] ) expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01]) self.assertEqual(len(pose_results), 2) torch.testing.assert_close(pose_results[1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4) torch.testing.assert_close(pose_results[1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2) torch.testing.assert_close(pose_results[1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4) @slow def test_batched_inference(self): image_processor = self.default_image_processor model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device) image = prepare_img() boxes = [ [[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]], [[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]], ] inputs = image_processor(images=[image, image], boxes=boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) heatmaps = outputs.heatmaps assert heatmaps.shape == (4, 17, 64, 48) expected_slice = torch.tensor( [ [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], ] ).to(torch_device) assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4) pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes) print(pose_results) expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034]) expected_keypoints = torch.tensor( [ [3.9813e02, 1.8184e02], [3.9828e02, 1.7981e02], [3.9596e02, 1.7948e02], ] ) expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01]) self.assertEqual(len(pose_results), 2) self.assertEqual(len(pose_results[0]), 2) torch.testing.assert_close(pose_results[0][1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4) torch.testing.assert_close(pose_results[0][1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2) torch.testing.assert_close(pose_results[0][1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
VitPoseModelIntegrationTest
python
getsentry__sentry
src/sentry/utils/snuba.py
{ "start": 17203, "end": 30629 }
class ____(urllib3.Retry): """ urllib3 Retry class does not allow us to retry on read errors but to exclude read timeout. Retrying after a timeout adds useless load to Snuba. """ def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """ Just rely on the parent class unless we have a read timeout. In that case immediately give up """ with sentry_sdk.start_span(op="snuba_pool.retry.increment") as span: # This next block is all debugging to try to track down a bug where we're seeing duplicate snuba requests # Wrapping the entire thing in a try/except to be safe cause none of it actually needs to run try: if error: error_class = error.__class__ module = error_class.__module__ name = error_class.__name__ span.set_tag("snuba_pool.retry.error", f"{module}.{name}") else: span.set_tag("snuba_pool.retry.error", "None") span.set_tag("snuba_pool.retry.total", self.total) span.set_tag("snuba_pool.response.status", "unknown") if response: if response.status: span.set_tag("snuba_pool.response.status", response.status) except Exception: pass if error and isinstance(error, urllib3.exceptions.ReadTimeoutError): raise error.with_traceback(_stacktrace) metrics.incr( "snuba.client.retry", tags={"method": method, "path": urlparse(url).path if url else None}, ) return super().increment( method=method, url=url, response=response, error=error, _pool=_pool, _stacktrace=_stacktrace, ) _snuba_pool = connection_from_url( settings.SENTRY_SNUBA, retries=RetrySkipTimeout( total=5, # Our calls to snuba frequently fail due to network issues. We want to # automatically retry most requests. Some of our POSTs and all of our DELETEs # do cause mutations, but we have other things in place to handle duplicate # mutations. allowed_methods={"GET", "POST", "DELETE"}, ), timeout=settings.SENTRY_SNUBA_TIMEOUT, maxsize=10, ) epoch_naive = datetime(1970, 1, 1, tzinfo=None) def to_naive_timestamp(value): """ Convert a time zone aware datetime to a POSIX timestamp (with fractional component.) """ return (value - epoch_naive).total_seconds() def to_start_of_hour(dt: datetime) -> str: """This is a function that mimics toStartOfHour from Clickhouse""" return dt.replace(minute=0, second=0, microsecond=0).isoformat() def get_snuba_column_name(name, dataset=Dataset.Events): """ Get corresponding Snuba column name from Sentry snuba map, if not found the column is assumed to be a tag. If name is falsy or name is a quoted literal (e.g. "'name'"), leave unchanged. """ no_conversion = {"group_id", "group_ids", "project_id", "start", "end"} if name in no_conversion: return name if not name or name.startswith("tags[") or QUOTED_LITERAL_RE.match(name): return name if name.startswith("flags["): # Flags queries are valid for the events dataset only. For other datasets we # query the tags expecting to find nothing. We can't return `None` or otherwise # short-circuit a query or filter that wouldn't be valid in its context. if dataset == Dataset.Events: return name else: return f"tags[{name.replace("[", "").replace("]", "").replace('"', "")}]" measurement_name = get_measurement_name(name) span_op_breakdown_name = get_span_op_breakdown_name(name) if "measurements_key" in DATASETS[dataset] and measurement_name: default = f"measurements[{measurement_name}]" elif "span_op_breakdowns_key" in DATASETS[dataset] and span_op_breakdown_name: default = f"span_op_breakdowns[{span_op_breakdown_name}]" else: default = f"tags[{name}]" return DATASETS[dataset].get(name, default) def get_function_index(column_expr, depth=0): """ If column_expr list contains a function, returns the index of its function name within column_expr (and assumption is that index + 1 is the list of arguments), otherwise None. A function expression is of the form: [func, [arg1, arg2]] => func(arg1, arg2) If a string argument is followed by list arg, the pair of them is assumed to be a nested function call, with extra args to the outer function afterward. [func1, [func2, [arg1, arg2], arg3]] => func1(func2(arg1, arg2), arg3) Although at the top level, there is no outer function call, and the optional 3rd argument is interpreted as an alias for the entire expression. [func, [arg1], alias] => function(arg1) AS alias You can also have a function part of an argument list: [func1, [arg1, func2, [arg2, arg3]]] => func1(arg1, func2(arg2, arg3)) """ index = None if isinstance(column_expr, (tuple, list)): i = 0 while i < len(column_expr) - 1: # The assumption here is that a list that follows a string means # the string is a function name if isinstance(column_expr[i], str) and isinstance(column_expr[i + 1], (tuple, list)): assert column_expr[i] in SAFE_FUNCTIONS or SAFE_FUNCTION_RE.match( column_expr[i] ), column_expr[i] index = i break else: i = i + 1 return index else: return None def get_arrayjoin(column): match = re.match(r"^(exception_stacks|exception_frames|contexts)\..+$", column) if match: return match.groups()[0] def get_organization_id_from_project_ids(project_ids: Sequence[int]) -> int: # any project will do, as they should all be from the same organization try: # Most of the time the project should exist, so get from cache to keep it fast organization_id = Project.objects.get_from_cache(pk=project_ids[0]).organization_id except Project.DoesNotExist: # But in the case the first project doesn't exist, grab the first non deleted project project = Project.objects.filter(pk__in=project_ids).values("organization_id").first() if project is None: raise UnqualifiedQueryError("All project_ids from the filter no longer exist") organization_id = project.get("organization_id") return organization_id def infer_project_ids_from_related_models( filter_keys: Mapping[str, Sequence[int]], ) -> list[int]: ids = [set(get_related_project_ids(k, filter_keys[k])) for k in filter_keys] return list(set.union(*ids)) def get_query_params_to_update_for_projects( query_params: SnubaQueryParams, with_org: bool = False ) -> tuple[int, dict[str, Any]]: """ Get the project ID and query params that need to be updated for project based datasets, before we send the query to Snuba. """ if "project_id" in query_params.filter_keys: # If we are given a set of project ids, use those directly. project_ids = list(set(query_params.filter_keys["project_id"])) elif query_params.filter_keys: # Otherwise infer the project_ids from any related models with timer("get_related_project_ids"): project_ids = infer_project_ids_from_related_models(query_params.filter_keys) elif query_params.conditions: project_ids = [] for cond in query_params.conditions: if cond[0] == "project_id": project_ids = [cond[2]] if cond[1] == "=" else cond[2] else: project_ids = [] if not project_ids: raise UnqualifiedQueryError( "No project_id filter, or none could be inferred from other filters." ) organization_id = get_organization_id_from_project_ids(project_ids) params: dict[str, Any] = {"project": project_ids} if with_org: params["organization"] = organization_id return organization_id, params def get_query_params_to_update_for_organizations(query_params): """ Get the organization ID and query params that need to be updated for organization based datasets, before we send the query to Snuba. """ if "org_id" in query_params.filter_keys: organization_ids = list(set(query_params.filter_keys["org_id"])) if len(organization_ids) != 1: raise UnqualifiedQueryError("Multiple organization_ids found. Only one allowed.") organization_id = organization_ids[0] elif "project_id" in query_params.filter_keys: organization_id, _ = get_query_params_to_update_for_projects(query_params) elif "key_id" in query_params.filter_keys: key_ids = list(set(query_params.filter_keys["key_id"])) project_key = ProjectKey.objects.get(pk=key_ids[0]) organization_id = project_key.project.organization_id else: organization_id = None if not organization_id: raise UnqualifiedQueryError( "No organization_id filter, or none could be inferred from other filters." ) return organization_id, {"organization": organization_id} def _prepare_start_end( start: datetime | None, end: datetime | None, organization_id: int, group_ids: Sequence[int] | None, ) -> tuple[datetime, datetime]: if not start: start = datetime(2008, 5, 8) if not end: end = datetime.utcnow() + timedelta(seconds=1) # convert to naive UTC datetimes, as Snuba only deals in UTC # and this avoids offset-naive and offset-aware issues start = naiveify_datetime(start) end = naiveify_datetime(end) expired, start = outside_retention_with_modified_start( start, end, Organization(organization_id) ) if expired: raise QueryOutsideRetentionError("Invalid date range. Please try a more recent date range.") # if `shrink_time_window` pushed `start` after `end` it means the user queried # a Group for T1 to T2 when the group was only active for T3 to T4, so the query # wouldn't return any results anyway new_start = shrink_time_window(group_ids, start) # TODO (alexh) this is a quick emergency fix for an occasion where a search # results in only 1 django candidate, which is then passed to snuba to # check and we raised because of it. Remove this once we figure out why the # candidate was returned from django at all if it existed only outside the # time range of the query if new_start <= end: start = new_start if start > end: raise QueryOutsideGroupActivityError return start, end def _prepare_query_params(query_params: SnubaQueryParams, referrer: str | None = None): kwargs = deepcopy(query_params.kwargs) query_params_conditions = deepcopy(query_params.conditions) with timer("get_snuba_map"): forward, reverse = get_snuba_translators( query_params.filter_keys, is_grouprelease=query_params.is_grouprelease ) if query_params.dataset in [ Dataset.Events, Dataset.Discover, Dataset.Sessions, Dataset.Transactions, Dataset.Replays, Dataset.IssuePlatform, ]: (organization_id, params_to_update) = get_query_params_to_update_for_projects( query_params, with_org=query_params.dataset == Dataset.Sessions ) elif query_params.dataset in [Dataset.Outcomes, Dataset.OutcomesRaw]: (organization_id, params_to_update) = get_query_params_to_update_for_organizations( query_params ) else: raise UnqualifiedQueryError( "No strategy found for getting an organization for the given dataset." ) kwargs.update(params_to_update) for col, keys in forward(deepcopy(query_params.filter_keys)).items(): if keys: if len(keys) == 1 and None in keys: query_params_conditions.append((col, "IS NULL", None)) else: query_params_conditions.append((col, "IN", keys)) start, end = _prepare_start_end( query_params.start, query_params.end, organization_id, query_params.filter_keys.get("group_id"), ) kwargs.update( { "dataset": query_params.dataset.value, "from_date": start.isoformat(), "to_date": end.isoformat(), "groupby": query_params.groupby, "conditions": query_params_conditions, "aggregations": query_params.aggregations, "granularity": query_params.rollup, # TODO: name these things the same } ) kwargs = {k: v for k, v in kwargs.items() if v is not None} if referrer: kwargs["tenant_ids"] = kwargs["tenant_ids"] if "tenant_ids" in kwargs else dict() kwargs["tenant_ids"]["referrer"] = referrer kwargs.update(OVERRIDE_OPTIONS) return kwargs, forward, reverse
RetrySkipTimeout
python
bokeh__bokeh
src/bokeh/models/mappers.py
{ "start": 10157, "end": 10755 }
class ____(ScanningColorMapper): ''' ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) bins = Int(default=256*256, help="Number of histogram bins") rescale_discrete_levels = Bool(default=False, help=""" If there are only a few discrete levels in the values that are color mapped then ``rescale_discrete_levels=True`` decreases the lower limit of the span so that the values are rendered towards the top end of the palette. """) @abstract
EqHistColorMapper
python
ethereum__web3.py
tests/core/providers/test_websocket_provider.py
{ "start": 852, "end": 18978 }
class ____(Exception): pass GET_BLOCK_JSON_MESSAGE = { "id": 0, "jsonrpc": "2.0", "method": "eth_getBlockByNumber", "params": ["latest", False], } def test_get_endpoint_uri_or_ipc_path_returns_endpoint_uri(): provider = WebSocketProvider("ws://mocked") assert ( provider.get_endpoint_uri_or_ipc_path() == "ws://mocked" == provider.endpoint_uri ) # -- async -- # def test_websocket_provider_default_values(): ws_uri = "ws://127.0.0.1:1337" with patch.dict("os.environ", {"WEB3_WS_PROVIDER_URI": ws_uri}): provider = WebSocketProvider() assert provider.endpoint_uri == ws_uri assert provider.use_text_frames is False @pytest.mark.asyncio async def test_disconnect_cleanup(): provider = WebSocketProvider("ws://mocked") with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): await provider.connect() assert provider._ws is not None assert provider._message_listener_task is not None # put some items in each cache provider._request_processor._request_response_cache.cache("0", "0x1337") provider._request_processor._request_information_cache.cache("0", "0x1337") provider._request_processor._subscription_response_queue.put_nowait({"id": "0"}) provider._request_processor._handler_subscription_queue.put_nowait({"id": "0"}) assert len(provider._request_processor._request_response_cache) == 1 assert len(provider._request_processor._request_information_cache) == 1 assert provider._request_processor._subscription_response_queue.qsize() == 1 assert provider._request_processor._handler_subscription_queue.qsize() == 1 await provider.disconnect() assert provider._ws is None assert len(provider._request_processor._request_response_cache) == 0 assert len(provider._request_processor._request_information_cache) == 0 assert provider._request_processor._subscription_response_queue.empty() assert provider._request_processor._handler_subscription_queue.empty() @pytest.mark.asyncio async def test_async_make_request_returns_desired_response(): provider = WebSocketProvider("ws://mocked") with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): await provider.connect() _mock_ws(provider) method_under_test = provider.make_request undesired_responses_count = 10 ws_messages = [ to_bytes( text=json.dumps( { "jsonrpc": "2.0", "method": "eth_subscription", "params": {"subscription": "0x1", "result": f"0x{i}"}, } ) ) for i in range(undesired_responses_count) ] # The first request we make should have an id of `0`, expect the response to match # that id. Append it as the last response in the list. ws_messages.append(b'{"jsonrpc": "2.0", "id": 0, "result": "0x1337"}') provider._ws = WebSocketMessageStreamMock(messages=ws_messages) response = await method_under_test(RPCEndpoint("some_method"), ["desired_params"]) assert response == json.loads(ws_messages.pop()) qsize = provider._request_processor._subscription_response_queue.qsize() assert qsize == len(ws_messages) == undesired_responses_count for _ in range(qsize): cached_response = ( await provider._request_processor._subscription_response_queue.get() ) # assert all cached responses are in the list of responses we received assert to_bytes(text=json.dumps(cached_response)) in ws_messages assert provider._request_processor._subscription_response_queue.empty() assert len(provider._request_processor._request_response_cache) == 0 await provider.disconnect() @pytest.mark.asyncio async def test_async_make_request_times_out_of_while_loop_looking_for_response(): timeout = 0.001 provider = WebSocketProvider("ws://mocked", request_timeout=timeout) method_under_test = provider.make_request _mock_ws(provider) with pytest.raises( TimeExhausted, match=r"Timed out waiting for response with request id `0` after " rf"{timeout} second\(s\)", ): await method_under_test(RPCEndpoint("some_method"), ["desired_params"]) @pytest.mark.asyncio async def test_msg_listener_task_starts_on_provider_connect_and_clears_on_disconnect(): provider = WebSocketProvider("ws://mocked") _mock_ws(provider) assert provider._message_listener_task is None with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): await provider.connect() # connect assert provider._message_listener_task is not None assert not provider._message_listener_task.cancelled() await provider.disconnect() # disconnect assert not provider._message_listener_task @pytest.mark.asyncio async def test_msg_listener_task_raises_exceptions_by_default(): provider = WebSocketProvider("ws://mocked") _mock_ws(provider) with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): await provider.connect() assert provider._message_listener_task is not None assert provider.silence_listener_task_exceptions is False provider._ws = WebSocketMessageStreamMock( raise_exception=WSException("test exception") ) with pytest.raises(WSException, match="test exception"): await provider._message_listener_task assert provider._message_listener_task.done() @pytest.mark.asyncio async def test_msg_listener_task_silences_exceptions_and_error_logs_when_configured( caplog, ): provider = WebSocketProvider("ws://mocked", silence_listener_task_exceptions=True) _mock_ws(provider) with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): await provider.connect() assert provider._message_listener_task is not None assert provider.silence_listener_task_exceptions is True provider._ws = WebSocketMessageStreamMock( raise_exception=WSException("test exception") ) await asyncio.sleep(0.05) assert "test exception" in caplog.text assert ( "Exception caught in listener, error logging and keeping listener background " "task alive.\n error=WSException: test exception" ) in caplog.text # assert is still running assert not provider._message_listener_task.cancelled() # proper cleanup await provider.disconnect() @pytest.mark.asyncio async def test_listen_event_awaits_msg_processing_when_subscription_queue_is_full(): """ This test is to ensure that the `listen_event` method will wait for the `process_subscriptions` method to process a message when the subscription queue is full. """ with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): async_w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) _mock_ws(async_w3.provider) assert async_w3.provider._message_listener_task is not None assert not async_w3.provider._message_listener_task.cancelled() # assert queue is instance of asyncio.Queue and replace with a new queue, maxsize=1 assert isinstance( async_w3.provider._request_processor._subscription_response_queue, type(asyncio.Queue()), ) async_w3.provider._request_processor._subscription_response_queue = asyncio.Queue( maxsize=1 ) assert not async_w3.provider._request_processor._subscription_response_queue.full() # mock listen event async_w3.provider._listen_event.wait = AsyncMock() async_w3.provider._listen_event.set = Mock() # mock subscription and add to active subscriptions sub_id = "0x1" sub_request_information = RequestInformation( method=RPCEndpoint("eth_subscribe"), params=["mock"], response_formatters=[[], [], []], subscription_id=sub_id, ) async_w3.provider._request_processor._request_information_cache.cache( generate_cache_key(sub_id), sub_request_information, ) sub = EthSubscription() sub._id = sub_id async_w3.subscription_manager._add_subscription(sub) mocked_sub = { "jsonrpc": "2.0", "method": "eth_subscription", "params": {"subscription": sub_id, "result": "0x1337"}, } # fill queue with one item so it is full async_w3.provider._request_processor._subscription_response_queue.put_nowait( mocked_sub ) assert async_w3.provider._request_processor._subscription_response_queue.full() # wait will be called on the _listen_event when the next message comes in async_w3.provider._listen_event.wait.assert_not_called() # mock the message stream with a single message # the message listener task should then call the _listen_event.wait since the # queue is full async_w3.provider._ws = WebSocketMessageStreamMock( messages=[to_bytes(text=json.dumps(mocked_sub))] ) await asyncio.sleep(0.05) async_w3.provider._listen_event.wait.assert_called_once() # set is not called until we start consuming messages async_w3.provider._listen_event.set.assert_not_called() async for message in async_w3.socket.process_subscriptions(): # assert the very next message is the formatted mocked subscription assert message == mocked_sub["params"] break # assert we set the _listen_event after we consume the message async_w3.provider._listen_event.set.assert_called_once() # proper cleanup await async_w3.provider.disconnect() @pytest.mark.asyncio async def test_async_iterator_pattern_exception_handling_for_requests(): iterations = 1 with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: WebSocketMessageStreamMock( raise_exception=ConnectionClosed(None, None) ), ): async for w3 in AsyncWeb3(WebSocketProvider("ws://mocked")): try: await w3.eth.block_number except ConnectionClosed: if iterations == 3: break else: iterations += 1 continue pytest.fail("Expected `ConnectionClosed` exception.") assert iterations == 3 @pytest.mark.asyncio async def test_async_iterator_pattern_exception_handling_for_subscriptions(): iterations = 1 with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: WebSocketMessageStreamMock( raise_exception=ConnectionClosed(None, None) ), ): async for w3 in AsyncWeb3(WebSocketProvider("ws://mocked")): try: async for _ in w3.socket.process_subscriptions(): # raises exception pass except ConnectionClosed: if iterations == 3: break else: iterations += 1 continue pytest.fail("Expected `ConnectionClosed` exception.") assert iterations == 3 @pytest.mark.asyncio async def test_connection_closed_ok_breaks_process_subscriptions_iteration(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: WebSocketMessageStreamMock( raise_exception=ConnectionClosedOK(None, None) ), ): w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) async for _ in w3.socket.process_subscriptions(): pytest.fail("Should not reach this point.") @pytest.mark.asyncio async def test_connection_closed_ok_breaks_handle_subscriptions_iteration(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: WebSocketMessageStreamMock( raise_exception=ConnectionClosedOK(None, None) ), ): w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) # would fail with a ``TimeoutError`` if the iteration did not break properly # on ``ConnectionClosedOK`` await asyncio.wait_for( w3.subscription_manager.handle_subscriptions(run_forever=True), timeout=1 ) @pytest.mark.asyncio async def test_listener_task_breaks_out_of_stream_when_cancelled(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): async_w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) async_w3.provider._message_listener_task.cancel() sub = EthSubscription() sub._id = "0x1" async_w3.subscription_manager._add_subscription(sub) # this should hang indefinitely if the listener task does not put a # ``TaskNotRunning`` in the ``_subscription_response_queue`` to break out of # listening. The call to ``provider._handle_listener_task_exceptions`` bubbles up # the exception. with pytest.raises(asyncio.CancelledError): async for _ in async_w3.socket.process_subscriptions(): ... @pytest.mark.asyncio async def test_listener_task_breaks_out_of_handle_subscriptions_when_cancelled(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): async_w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) async_w3.provider._message_listener_task.cancel() sub = EthSubscription(handler=AsyncMock()) sub._id = "0x1" async_w3.subscription_manager._add_subscription(sub) # this should hang indefinitely if the listener task does not put a # ``TaskNotRunning`` in the ``_handler_subscription_queue`` to break out of # listening. The call to ``provider._handle_listener_task_exceptions`` bubbles # up the exception. with pytest.raises(asyncio.CancelledError): await async_w3.subscription_manager.handle_subscriptions(run_forever=True) @pytest.mark.asyncio async def test_persistent_connection_provider_empty_batch_response(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): with pytest.raises(Web3RPCError, match="empty batch"): async with AsyncWeb3(WebSocketProvider("ws://mocked")) as async_w3: async with async_w3.batch_requests() as batch: assert async_w3.provider._is_batching async_w3.provider._ws.recv = AsyncMock() async_w3.provider._ws.recv.return_value = ( b'{"jsonrpc": "2.0","id":null,"error": {"code": -32600, ' b'"message": "empty batch"}}\n' ) await batch.async_execute() # assert that even though there was an error, we have reset the batching # state assert not async_w3.provider._is_batching @pytest.mark.parametrize( "use_text_frames, expected_send_arg", ( (False, to_bytes(text=json.dumps(GET_BLOCK_JSON_MESSAGE))), (True, json.dumps(GET_BLOCK_JSON_MESSAGE)), ), ) @pytest.mark.asyncio async def test_websocket_provider_use_text_frames(use_text_frames, expected_send_arg): provider = WebSocketProvider("ws://mocked", use_text_frames=use_text_frames) assert provider.use_text_frames is use_text_frames # mock provider and add a mocked response to the cache _mock_ws(provider) provider._ws.send = AsyncMock() provider._request_processor._request_response_cache.cache( generate_cache_key(0), "0x1337" ) await provider.make_request(RPCEndpoint("eth_getBlockByNumber"), ["latest", False]) provider._ws.send.assert_called_once_with(expected_send_arg) @pytest.mark.asyncio async def test_websocket_provider_raises_errors_from_cache_not_tied_to_a_request(): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: WebSocketMessageStreamMock( messages=[ b'{"id": 0, "jsonrpc": "2.0", "result": "0x0"}\n', b'{"id": null, "jsonrpc": "2.0", "error": {"code": 21, "message": "Request shutdown"}}\n', # noqa: E501 ] ), ): async_w3 = await AsyncWeb3(WebSocketProvider("ws://mocked")) with pytest.raises(Web3RPCError, match="Request shutdown"): await asyncio.sleep(0.1) await async_w3.eth.block_number @pytest.mark.asyncio async def test_req_info_cache_size_can_be_set_and_warns_when_full(caplog): with patch( "web3.providers.persistent.websocket.connect", new=lambda *_1, **_2: _mocked_ws_conn(), ): async_w3 = await AsyncWeb3( WebSocketProvider("ws://mocked", request_information_cache_size=1) ) async_w3.provider._request_processor.cache_request_information( RPCEndpoint("eth_getBlockByNumber"), ["latest"], tuple(), tuple(), ) assert len(async_w3.provider._request_processor._request_information_cache) == 1 assert ( "Request information cache is full. This may result in unexpected " "behavior. Consider increasing the ``request_information_cache_size`` " "on the provider." ) in caplog.text @pytest.mark.asyncio async def test_raise_stray_errors_from_cache_handles_list_response_without_error(): provider = WebSocketProvider("ws://mocked") _mock_ws(provider) bad_response = [ {"id": None, "jsonrpc": "2.0", "error": {"code": 21, "message": "oops"}} ] provider._request_processor._request_response_cache._data["bad_key"] = bad_response # assert no errors raised provider._raise_stray_errors_from_cache()
WSException
python
pytorch__pytorch
torch/_inductor/compile_fx_ext.py
{ "start": 6874, "end": 7313 }
class ____: value: bytes def deserialize(self) -> _WireProtocolInput: """ Turn this streamable object back into a _WireProtocolInput. """ from torch.fx._graph_pickler import GraphPickler fake_mode = _current_fake_mode() result = GraphPickler.loads(self.value, fake_mode) assert isinstance(result, _WireProtocolInput) return result @dataclass
_WireProtocolPickledInput
python
networkx__networkx
networkx/algorithms/tests/test_clique.py
{ "start": 101, "end": 7547 }
class ____: def setup_method(self): z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1] self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1) self.cl = list(nx.find_cliques(self.G)) H = nx.complete_graph(6) H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)}) H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)]) self.H = H def test_find_cliques1(self): cl = list(nx.find_cliques(self.G)) rcl = nx.find_cliques_recursive(self.G) expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl)) assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) def test_selfloops(self): self.G.add_edge(1, 1) cl = list(nx.find_cliques(self.G)) rcl = list(nx.find_cliques_recursive(self.G)) assert set(map(frozenset, cl)) == set(map(frozenset, rcl)) answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}] assert len(answer) == len(cl) assert all(set(c) in answer for c in cl) def test_find_cliques2(self): hcl = list(nx.find_cliques(self.H)) assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]] def test_find_cliques3(self): # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] cl = list(nx.find_cliques(self.G, [2])) rcl = nx.find_cliques_recursive(self.G, [2]) expected = [[2, 6, 1, 3], [2, 6, 4]] assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) cl = list(nx.find_cliques(self.G, [2, 3])) rcl = nx.find_cliques_recursive(self.G, [2, 3]) expected = [[2, 6, 1, 3]] assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) cl = list(nx.find_cliques(self.G, [2, 6, 4])) rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) expected = [[2, 6, 4]] assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) cl = list(nx.find_cliques(self.G, [2, 6, 4])) rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) expected = [[2, 6, 4]] assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) with pytest.raises(ValueError): list(nx.find_cliques(self.G, [2, 6, 4, 1])) with pytest.raises(ValueError): list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1])) def test_find_cliques_directed(self): G = nx.path_graph(4, create_using=nx.DiGraph) msg = "not implemented for directed" with pytest.raises(nx.NetworkXNotImplemented, match=msg): list(nx.find_cliques(G)) with pytest.raises(nx.NetworkXNotImplemented, match=msg): list(nx.find_cliques_recursive(G)) def test_number_of_cliques(self): G = self.G assert nx.number_of_cliques(G, 1) == 1 assert list(nx.number_of_cliques(G, [1]).values()) == [1] assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2] assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2} assert nx.number_of_cliques(G, 2) == 2 assert nx.number_of_cliques(G) == { 1: 1, 2: 2, 3: 1, 4: 2, 5: 1, 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, } assert nx.number_of_cliques(G, nodes=list(G)) == { 1: 1, 2: 2, 3: 1, 4: 2, 5: 1, 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, } assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2} assert nx.number_of_cliques(G, cliques=self.cl) == { 1: 1, 2: 2, 3: 1, 4: 2, 5: 1, 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, } assert nx.number_of_cliques(G, list(G), cliques=self.cl) == { 1: 1, 2: 2, 3: 1, 4: 2, 5: 1, 6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, } def test_node_clique_number(self): G = self.G assert nx.node_clique_number(G, 1) == 4 assert list(nx.node_clique_number(G, [1]).values()) == [4] assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4] assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4} assert nx.node_clique_number(G, 1) == 4 assert nx.node_clique_number(G) == { 1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4, 7: 3, 8: 2, 9: 2, 10: 2, 11: 2, } assert nx.node_clique_number(G, cliques=self.cl) == { 1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4, 7: 3, 8: 2, 9: 2, 10: 2, 11: 2, } assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4} assert nx.node_clique_number(G, 1, cliques=self.cl) == 4 def test_make_clique_bipartite(self): G = self.G B = nx.make_clique_bipartite(G) assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # Project onto the nodes of the original graph. H = nx.projected_graph(B, range(1, 12)) assert H.adj == G.adj # Project onto the nodes representing the cliques. H1 = nx.projected_graph(B, range(-5, 0)) # Relabel the negative numbers as positive ones. H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) assert sorted(H1) == [1, 2, 3, 4, 5] def test_make_max_clique_graph(self): """Tests that the maximal clique graph is the same as the bipartite clique graph after being projected onto the nodes representing the cliques. """ G = self.G B = nx.make_clique_bipartite(G) # Project onto the nodes representing the cliques. H1 = nx.projected_graph(B, range(-5, 0)) # Relabel the negative numbers as nonnegative ones, starting at # 0. H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) H2 = nx.make_max_clique_graph(G) assert H1.adj == H2.adj def test_directed(self): with pytest.raises(nx.NetworkXNotImplemented): next(nx.find_cliques(nx.DiGraph())) def test_find_cliques_trivial(self): G = nx.Graph() assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] def test_make_max_clique_graph_create_using(self): G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)]) E = nx.Graph([(0, 1), (0, 2), (1, 2)]) E.add_node(3) assert nx.is_isomorphic(nx.make_max_clique_graph(G, create_using=nx.Graph), E)
TestCliques
python
sympy__sympy
sympy/vector/dyadic.py
{ "start": 5689, "end": 7167 }
class ____(Dyadic, AtomicExpr): """ Class to denote a base dyadic tensor component. """ def __new__(cls, vector1, vector2): Vector = sympy.vector.Vector BaseVector = sympy.vector.BaseVector VectorZero = sympy.vector.VectorZero # Verify arguments if not isinstance(vector1, (BaseVector, VectorZero)) or \ not isinstance(vector2, (BaseVector, VectorZero)): raise TypeError("BaseDyadic cannot be composed of non-base " + "vectors") # Handle special case of zero vector elif vector1 == Vector.zero or vector2 == Vector.zero: return Dyadic.zero # Initialize instance obj = super().__new__(cls, vector1, vector2) obj._base_instance = obj obj._measure_number = 1 obj._components = {obj: S.One} obj._sys = vector1._sys obj._pretty_form = ('(' + vector1._pretty_form + '|' + vector2._pretty_form + ')') obj._latex_form = (r'\left(' + vector1._latex_form + r"{\middle|}" + vector2._latex_form + r'\right)') return obj def _sympystr(self, printer): return "({}|{})".format( printer._print(self.args[0]), printer._print(self.args[1])) def _sympyrepr(self, printer): return "BaseDyadic({}, {})".format( printer._print(self.args[0]), printer._print(self.args[1]))
BaseDyadic
python
nedbat__coveragepy
tests/test_html.py
{ "start": 55959, "end": 57008 }
class ____(HtmlTestHelpers, CoverageTest): """Tests of the helpers in HtmlTestHelpers.""" def test_bad_link(self) -> None: # Does assert_valid_hrefs detect links to non-existent files? self.make_file("htmlcov/index.html", "<a href='nothing.html'>Nothing</a>") msg = "These files link to 'nothing.html', which doesn't exist: htmlcov.index.html" with pytest.raises(AssertionError, match=msg): self.assert_valid_hrefs() def test_bad_anchor(self) -> None: # Does assert_valid_hrefs detect fragments that go nowhere? self.make_file("htmlcov/index.html", "<a href='#nothing'>Nothing</a>") msg = "Fragment '#nothing' in htmlcov.index.html has no anchor" with pytest.raises(AssertionError, match=msg): self.assert_valid_hrefs() @pytest.mark.parametrize( "n, key", [ (0, "a"), (1, "b"), (999999999, "e9S_p"), ], ) def test_encode_int(n: int, key: str) -> None: assert coverage.html.encode_int(n) == key
HtmlHelpersTest
python
astropy__astropy
astropy/coordinates/builtin_frames/equatorial.py
{ "start": 3843, "end": 4865 }
class ____(BaseCoordinateFrame): """ A coordinate or frame in the True Equator Mean Equinox frame (TEME). This frame is a geocentric system similar to CIRS or geocentric apparent place, except that the mean sidereal time is used to rotate from TIRS. TEME coordinates are most often used in combination with orbital data for satellites in the two-line-ephemeris format. Different implementations of the TEME frame exist. For clarity, this frame follows the conventions and relations to other frames that are set out in Vallado et al (2006). For more background on TEME, see the references provided in the :ref:`astropy:astropy-coordinates-seealso` section of the documentation. """ default_representation = CartesianRepresentation default_differential = CartesianDifferential obstime = TimeAttribute(doc="The reference time (e.g., time of observation)") # Transformation functions for getting to/from TEME and ITRS are in # intermediate rotation transforms.py
TEME
python
django__django
tests/admin_inlines/models.py
{ "start": 8608, "end": 8731 }
class ____(models.Model): name = models.CharField(max_length=128) def __str__(self): return self.name
Course
python
tensorflow__tensorflow
tensorflow/python/training/basic_session_run_hooks_test.py
{ "start": 57464, "end": 61949 }
class ____(test.TestCase): def setUp(self): super(ProfilerHookTest, self).setUp() self.output_dir = tempfile.mkdtemp() self.graph = ops.Graph() self.filepattern = os.path.join(self.output_dir, 'timeline-*.json') with self.graph.as_default(): self.global_step = training_util.get_or_create_global_step() self.train_op = state_ops.assign_add(self.global_step, 1) def tearDown(self): super(ProfilerHookTest, self).tearDown() shutil.rmtree(self.output_dir, ignore_errors=True) def _count_timeline_files(self): return len(gfile.Glob(self.filepattern)) @test_util.run_deprecated_v1 def test_raise_in_both_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20) @test_util.run_deprecated_v1 def test_raise_in_none_secs_and_steps(self): with self.assertRaises(ValueError): basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None) def test_save_secs_does_not_save_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) self.assertEqual(0, self._count_timeline_files()) @test.mock.patch.object(time, 'time') def test_save_secs_saves_periodically(self, mock_time): # Pick a fixed start time. with self.graph.as_default(): mock_time.return_value = MOCK_START_TIME hook = basic_session_run_hooks.ProfilerHook( save_secs=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Not saved. self.assertEqual(0, self._count_timeline_files()) # Simulate 2.5 seconds of sleep. mock_time.return_value = MOCK_START_TIME + 2.5 sess.run(self.train_op) # Saved. self.assertEqual(1, self._count_timeline_files()) # Pretend some small amount of time has passed. mock_time.return_value = MOCK_START_TIME + 2.6 sess.run(self.train_op) # Not saved. # Edge test just before we should save the timeline. mock_time.return_value = MOCK_START_TIME + 4.4 sess.run(self.train_op) # Not saved. self.assertEqual(1, self._count_timeline_files()) mock_time.return_value = MOCK_START_TIME + 4.5 sess.run(self.train_op) # Saved. self.assertEqual(2, self._count_timeline_files()) def test_save_steps_does_not_save_in_first_step(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_steps=1, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Not saved. self.assertEqual(0, self._count_timeline_files()) def test_save_steps_saves_periodically(self): with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_steps=2, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: self.assertEqual(0, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(0, self._count_timeline_files()) sess.run(self.train_op) # Saved. self.assertEqual(1, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(1, self._count_timeline_files()) sess.run(self.train_op) # Saved. self.assertEqual(2, self._count_timeline_files()) sess.run(self.train_op) # Not saved. self.assertEqual(2, self._count_timeline_files()) def test_run_metadata_saves(self): writer_cache.FileWriterCache.clear() fake_summary_writer.FakeSummaryWriter.install() fake_writer = writer_cache.FileWriterCache.get(self.output_dir) with self.graph.as_default(): hook = basic_session_run_hooks.ProfilerHook( save_steps=1, output_dir=self.output_dir) with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess: sess.run(self.train_op) # Not saved. sess.run(self.train_op) # Saved. self.assertEqual( list(fake_writer._added_run_metadata.keys()), ['step_2']) fake_summary_writer.FakeSummaryWriter.uninstall() if __name__ == '__main__': test.main()
ProfilerHookTest
python
RaRe-Technologies__gensim
gensim/test/test_word2vec.py
{ "start": 1523, "end": 53068 }
class ____(unittest.TestCase): def test_build_vocab_from_freq(self): """Test that the algorithm is able to build vocabulary from given frequency table""" freq_dict = { 'minors': 2, 'graph': 3, 'system': 4, 'trees': 3, 'eps': 2, 'computer': 2, 'survey': 2, 'user': 3, 'human': 2, 'time': 2, 'interface': 2, 'response': 2 } freq_dict_orig = freq_dict.copy() model_hs = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42, hs=0, negative=5) model_hs.build_vocab_from_freq(freq_dict) model_neg.build_vocab_from_freq(freq_dict) self.assertEqual(len(model_hs.wv), 12) self.assertEqual(len(model_neg.wv), 12) for k in freq_dict_orig.keys(): self.assertEqual(model_hs.wv.get_vecattr(k, 'count'), freq_dict_orig[k]) self.assertEqual(model_neg.wv.get_vecattr(k, 'count'), freq_dict_orig[k]) new_freq_dict = { 'computer': 1, 'artificial': 4, 'human': 1, 'graph': 1, 'intelligence': 4, 'system': 1, 'trees': 1 } model_hs.build_vocab_from_freq(new_freq_dict, update=True) model_neg.build_vocab_from_freq(new_freq_dict, update=True) self.assertEqual(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertEqual(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14) def test_prune_vocab(self): """Test Prune vocab while scanning sentences""" sentences = [ ["graph", "system"], ["graph", "system"], ["system", "eps"], ["graph", "system"] ] model = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, max_vocab_size=2, seed=42, hs=1, negative=0) self.assertEqual(len(model.wv), 2) self.assertEqual(model.wv.get_vecattr('graph', 'count'), 3) self.assertEqual(model.wv.get_vecattr('system', 'count'), 4) sentences = [ ["graph", "system"], ["graph", "system"], ["system", "eps"], ["graph", "system"], ["minors", "survey", "minors", "survey", "minors"] ] model = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, max_vocab_size=2, seed=42, hs=1, negative=0) self.assertEqual(len(model.wv), 3) self.assertEqual(model.wv.get_vecattr('graph', 'count'), 3) self.assertEqual(model.wv.get_vecattr('minors', 'count'), 3) self.assertEqual(model.wv.get_vecattr('system', 'count'), 4) def test_total_word_count(self): model = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42) total_words = model.scan_vocab(sentences)[0] self.assertEqual(total_words, 29) def test_max_final_vocab(self): # Test for less restricting effect of max_final_vocab # max_final_vocab is specified but has no effect model = word2vec.Word2Vec(vector_size=10, max_final_vocab=4, min_count=4, sample=0) model.scan_vocab(sentences) reported_values = model.prepare_vocab() self.assertEqual(reported_values['drop_unique'], 11) self.assertEqual(reported_values['retain_total'], 4) self.assertEqual(reported_values['num_retained_words'], 1) self.assertEqual(model.effective_min_count, 4) # Test for more restricting effect of max_final_vocab # results in setting a min_count more restricting than specified min_count model = word2vec.Word2Vec(vector_size=10, max_final_vocab=4, min_count=2, sample=0) model.scan_vocab(sentences) reported_values = model.prepare_vocab() self.assertEqual(reported_values['drop_unique'], 8) self.assertEqual(reported_values['retain_total'], 13) self.assertEqual(reported_values['num_retained_words'], 4) self.assertEqual(model.effective_min_count, 3) def test_online_learning(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" model_hs = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, seed=42, hs=0, negative=5) self.assertTrue(len(model_hs.wv), 12) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 3) model_hs.build_vocab(new_sentences, update=True) model_neg.build_vocab(new_sentences, update=True) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertTrue(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14) def test_online_learning_after_save(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" tmpf = get_tmpfile('gensim_word2vec.tst') model_neg = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, seed=42, hs=0, negative=5) model_neg.save(tmpf) model_neg = word2vec.Word2Vec.load(tmpf) self.assertTrue(len(model_neg.wv), 12) model_neg.build_vocab(new_sentences, update=True) model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.epochs) self.assertEqual(len(model_neg.wv), 14) def test_online_learning_from_file(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" with temporary_file(get_tmpfile('gensim_word2vec1.tst')) as corpus_file, \ temporary_file(get_tmpfile('gensim_word2vec2.tst')) as new_corpus_file: utils.save_as_line_sentence(sentences, corpus_file) utils.save_as_line_sentence(new_sentences, new_corpus_file) model_hs = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=0, negative=5) self.assertTrue(len(model_hs.wv), 12) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 3) model_hs.build_vocab(corpus_file=new_corpus_file, update=True) model_hs.train(corpus_file=new_corpus_file, total_words=model_hs.corpus_total_words, epochs=model_hs.epochs) model_neg.build_vocab(corpus_file=new_corpus_file, update=True) model_neg.train( corpus_file=new_corpus_file, total_words=model_hs.corpus_total_words, epochs=model_hs.epochs) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertTrue(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14) def test_online_learning_after_save_from_file(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" with temporary_file(get_tmpfile('gensim_word2vec1.tst')) as corpus_file, \ temporary_file(get_tmpfile('gensim_word2vec2.tst')) as new_corpus_file: utils.save_as_line_sentence(sentences, corpus_file) utils.save_as_line_sentence(new_sentences, new_corpus_file) tmpf = get_tmpfile('gensim_word2vec.tst') model_neg = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=0, negative=5) model_neg.save(tmpf) model_neg = word2vec.Word2Vec.load(tmpf) self.assertTrue(len(model_neg.wv), 12) # Check that training works on the same data after load without calling build_vocab model_neg.train(corpus_file=corpus_file, total_words=model_neg.corpus_total_words, epochs=model_neg.epochs) # Train on new corpus file model_neg.build_vocab(corpus_file=new_corpus_file, update=True) model_neg.train(corpus_file=new_corpus_file, total_words=model_neg.corpus_total_words, epochs=model_neg.epochs) self.assertEqual(len(model_neg.wv), 14) def onlineSanity(self, model, trained_model=False): terro, others = [], [] for line in lee_corpus_list: if 'terrorism' in line: terro.append(line) else: others.append(line) self.assertTrue(all('terrorism' not in line for line in others)) model.build_vocab(others, update=trained_model) model.train(others, total_examples=model.corpus_count, epochs=model.epochs) self.assertFalse('terrorism' in model.wv) model.build_vocab(terro, update=True) self.assertTrue('terrorism' in model.wv) orig0 = np.copy(model.wv.vectors) model.train(terro, total_examples=len(terro), epochs=model.epochs) self.assertFalse(np.allclose(model.wv.vectors, orig0)) sim = model.wv.n_similarity(['war'], ['terrorism']) self.assertLess(0., sim) def test_sg_hs_online(self): """Test skipgram w/ hierarchical softmax""" model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, epochs=10, seed=42, workers=2) self.onlineSanity(model) def test_sg_neg_online(self): """Test skipgram w/ negative sampling""" model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, epochs=10, seed=42, workers=2) self.onlineSanity(model) def test_cbow_hs_online(self): """Test CBOW w/ hierarchical softmax""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0, min_count=3, epochs=20, seed=42, workers=2 ) self.onlineSanity(model) def test_cbow_neg_online(self): """Test CBOW w/ negative sampling""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, epochs=10, seed=42, workers=2, sample=0 ) self.onlineSanity(model) def test_persistence(self): """Test storing/loading the entire model.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.save(tmpf) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) # test persistence of the KeyedVectors of a model wv = model.wv wv.save(tmpf) loaded_wv = keyedvectors.KeyedVectors.load(tmpf) self.assertTrue(np.allclose(wv.vectors, loaded_wv.vectors)) self.assertEqual(len(wv), len(loaded_wv)) def test_persistence_backwards_compatible(self): """Can we still load a model created with an older gensim version?""" path = datapath('model-from-gensim-3.8.0.w2v') model = word2vec.Word2Vec.load(path) x = model.score(['test']) assert x is not None def test_persistence_from_file(self): """Test storing/loading the entire model trained with corpus_file argument.""" with temporary_file(get_tmpfile('gensim_word2vec.tst')) as corpus_file: utils.save_as_line_sentence(sentences, corpus_file) tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(corpus_file=corpus_file, min_count=1) model.save(tmpf) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) # test persistence of the KeyedVectors of a model wv = model.wv wv.save(tmpf) loaded_wv = keyedvectors.KeyedVectors.load(tmpf) self.assertTrue(np.allclose(wv.vectors, loaded_wv.vectors)) self.assertEqual(len(wv), len(loaded_wv)) def test_persistence_with_constructor_rule(self): """Test storing/loading the entire model with a vocab trimming rule passed in the constructor.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule) model.save(tmpf) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) def test_rule_with_min_count(self): """Test that returning RULE_DEFAULT from trim_rule triggers min_count.""" model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule) self.assertTrue("human" not in model.wv) self.assertTrue("occurs_only_once" not in model.wv) self.assertTrue("interface" in model.wv) def test_rule(self): """Test applying vocab trim_rule to build_vocab instead of constructor.""" model = word2vec.Word2Vec(min_count=1) model.build_vocab(sentences, trim_rule=_rule) self.assertTrue("human" not in model.wv) def test_lambda_rule(self): """Test that lambda trim_rule works.""" def rule(word, count, min_count): return utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule) self.assertTrue("human" not in model.wv) def obsolete_testLoadPreKeyedVectorModel(self): """Test loading pre-KeyedVectors word2vec model""" if sys.version_info[:2] == (3, 4): model_file_suffix = '_py3_4' elif sys.version_info < (3,): model_file_suffix = '_py2' else: model_file_suffix = '_py3' # Model stored in one file model_file = 'word2vec_pre_kv%s' % model_file_suffix model = word2vec.Word2Vec.load(datapath(model_file)) self.assertTrue(model.wv.vectors.shape == (len(model.wv), model.vector_size)) self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size)) # Model stored in multiple files model_file = 'word2vec_pre_kv_sep%s' % model_file_suffix model = word2vec.Word2Vec.load(datapath(model_file)) self.assertTrue(model.wv.vectors.shape == (len(model.wv), model.vector_size)) self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size)) def test_load_pre_keyed_vector_model_c_format(self): """Test loading pre-KeyedVectors word2vec model saved in word2vec format""" model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c')) self.assertTrue(model.vectors.shape[0] == len(model)) def test_persistence_word2vec_format(self): """Test storing/loading the entire model in word2vec format.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tmpf, binary=True) binary_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) self.assertTrue(np.allclose(model.wv['human'], binary_model_kv['human'])) norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) norm_only_model.unit_normalize_all() self.assertFalse(np.allclose(model.wv['human'], norm_only_model['human'])) self.assertTrue(np.allclose(model.wv.get_vector('human', norm=True), norm_only_model['human'])) limited_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True, limit=3) self.assertEqual(len(limited_model_kv.vectors), 3) half_precision_model_kv = keyedvectors.KeyedVectors.load_word2vec_format( tmpf, binary=True, datatype=np.float16 ) self.assertEqual(binary_model_kv.vectors.nbytes, half_precision_model_kv.vectors.nbytes * 2) def test_no_training_c_format(self): tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tmpf, binary=True) kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) binary_model = word2vec.Word2Vec() binary_model.wv = kv self.assertRaises(ValueError, binary_model.train, sentences) def test_too_short_binary_word2vec_format(self): tfile = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tfile, binary=True) f = open(tfile, 'r+b') f.write(b'13') # write wrong (too-long) vector count f.close() self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=True) def test_too_short_text_word2vec_format(self): tfile = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tfile, binary=False) f = open(tfile, 'r+b') f.write(b'13') # write wrong (too-long) vector count f.close() self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=False) def test_persistence_word2vec_format_non_binary(self): """Test storing/loading the entire model in word2vec non-binary format.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tmpf, binary=False) text_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=False) self.assertTrue(np.allclose(model.wv['human'], text_model['human'], atol=1e-6)) norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=False) norm_only_model.unit_normalize_all() self.assertFalse(np.allclose(model.wv['human'], norm_only_model['human'], atol=1e-6)) self.assertTrue(np.allclose( model.wv.get_vector('human', norm=True), norm_only_model['human'], atol=1e-4 )) def test_persistence_word2vec_format_with_vocab(self): """Test storing/loading the entire model and vocabulary in word2vec format.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) testvocab = get_tmpfile('gensim_word2vec.vocab') model.wv.save_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True) self.assertEqual( model.wv.get_vecattr('human', 'count'), binary_model_with_vocab_kv.get_vecattr('human', 'count'), ) def test_persistence_keyed_vectors_format_with_vocab(self): """Test storing/loading the entire model and vocabulary in word2vec format.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) testvocab = get_tmpfile('gensim_word2vec.vocab') model.wv.save_word2vec_format(tmpf, testvocab, binary=True) kv_binary_model_with_vocab = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True) self.assertEqual( model.wv.get_vecattr('human', 'count'), kv_binary_model_with_vocab.get_vecattr('human', 'count'), ) def test_persistence_word2vec_format_combination_with_standard_persistence(self): """Test storing/loading the entire model and vocabulary in word2vec format chained with saving and loading via `save` and `load` methods`. It was possible prior to 1.0.0 release, now raises Exception""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) testvocab = get_tmpfile('gensim_word2vec.vocab') model.wv.save_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv.save(tmpf) self.assertRaises(AttributeError, word2vec.Word2Vec.load, tmpf) def test_large_mmap(self): """Test storing/loading the entire model.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) # test storing the internal arrays into separate files model.save(tmpf, sep_limit=0) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) # make sure mmaping the arrays back works, too self.models_equal(model, word2vec.Word2Vec.load(tmpf, mmap='r')) def test_vocab(self): """Test word2vec vocabulary building.""" corpus = LeeCorpus() total_words = sum(len(sentence) for sentence in corpus) # try vocab building explicitly, using all words model = word2vec.Word2Vec(min_count=1, hs=1, negative=0) model.build_vocab(corpus) self.assertTrue(len(model.wv) == 6981) # with min_count=1, we're not throwing away anything, # so make sure the word counts add up to be the entire corpus self.assertEqual(sum(model.wv.get_vecattr(k, 'count') for k in model.wv.key_to_index), total_words) # make sure the binary codes are correct np.allclose(model.wv.get_vecattr('the', 'code'), [1, 1, 0, 0]) # test building vocab with default params model = word2vec.Word2Vec(hs=1, negative=0) model.build_vocab(corpus) self.assertTrue(len(model.wv) == 1750) np.allclose(model.wv.get_vecattr('the', 'code'), [1, 1, 1, 0]) # no input => "RuntimeError: you must first build vocabulary before training the model" self.assertRaises(RuntimeError, word2vec.Word2Vec, []) # input not empty, but rather completely filtered out self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=total_words + 1) def test_training(self): """Test word2vec training.""" # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, hs=1, negative=0) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, hs=1, negative=0) self.models_equal(model, model2) def test_training_from_file(self): """Test word2vec training with corpus_file argument.""" # build vocabulary, don't train yet with temporary_file(get_tmpfile('gensim_word2vec.tst')) as tf: utils.save_as_line_sentence(sentences, tf) model = word2vec.Word2Vec(vector_size=2, min_count=1, hs=1, negative=0) model.build_vocab(corpus_file=tf) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1.shape == (len(model.wv), 2)) model.train(corpus_file=tf, total_words=model.corpus_total_words, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) def test_scoring(self): """Test word2vec scoring.""" model = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, hs=1, negative=0) # just score and make sure they exist scores = model.score(sentences, len(sentences)) self.assertEqual(len(scores), len(sentences)) def test_locking(self): """Test word2vec training doesn't change locked vectors.""" corpus = LeeCorpus() # build vocabulary, don't train yet for sg in range(2): # test both cbow and sg model = word2vec.Word2Vec(vector_size=4, hs=1, negative=5, min_count=1, sg=sg, window=5) model.build_vocab(corpus) # remember two vectors locked0 = np.copy(model.wv.vectors[0]) unlocked1 = np.copy(model.wv.vectors[1]) # alocate a full lockf array (not just default single val for all) model.wv.vectors_lockf = np.ones(len(model.wv), dtype=np.float32) # lock the vector in slot 0 against change model.wv.vectors_lockf[0] = 0.0 model.train(corpus, total_examples=model.corpus_count, epochs=model.epochs) self.assertFalse((unlocked1 == model.wv.vectors[1]).all()) # unlocked vector should vary self.assertTrue((locked0 == model.wv.vectors[0]).all()) # locked vector should not vary def test_evaluate_word_analogies(self): """Test that evaluating analogies on KeyedVectors give sane results""" model = word2vec.Word2Vec(LeeCorpus()) score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt')) score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies( datapath('questions-words.txt'), similarity_function='most_similar_cosmul' ) self.assertEqual(score, score_cosmul) self.assertEqual(sections, sections_cosmul) self.assertGreaterEqual(score, 0.0) self.assertLessEqual(score, 1.0) self.assertGreater(len(sections), 0) # Check that dict contains the right keys first_section = sections[0] self.assertIn('section', first_section) self.assertIn('correct', first_section) self.assertIn('incorrect', first_section) def test_evaluate_word_pairs(self): """Test Spearman and Pearson correlation coefficients give sane results on similarity datasets""" corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2')) model = word2vec.Word2Vec(corpus, min_count=3, epochs=20) correlation = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv')) pearson = correlation[0][0] spearman = correlation[1][0] oov = correlation[2] self.assertTrue(0.1 < pearson < 1.0, f"pearson {pearson} not between 0.1 & 1.0") self.assertTrue(0.1 < spearman < 1.0, f"spearman {spearman} not between 0.1 and 1.0") self.assertTrue(0.0 <= oov < 90.0, f"OOV {oov} not between 0.0 and 90.0") def test_evaluate_word_pairs_from_file(self): """Test Spearman and Pearson correlation coefficients give sane results on similarity datasets""" with temporary_file(get_tmpfile('gensim_word2vec.tst')) as tf: utils.save_as_line_sentence(word2vec.LineSentence(datapath('head500.noblanks.cor.bz2')), tf) model = word2vec.Word2Vec(corpus_file=tf, min_count=3, epochs=20) correlation = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv')) pearson = correlation[0][0] spearman = correlation[1][0] oov = correlation[2] self.assertTrue(0.1 < pearson < 1.0, f"pearson {pearson} not between 0.1 & 1.0") self.assertTrue(0.1 < spearman < 1.0, f"spearman {spearman} not between 0.1 and 1.0") self.assertTrue(0.0 <= oov < 90.0, f"OOV {oov} not between 0.0 and 90.0") def model_sanity(self, model, train=True, with_corpus_file=False, ranks=None): """Even tiny models trained on LeeCorpus should pass these sanity checks""" # run extra before/after training tests if train=True if train: model.build_vocab(lee_corpus_list) orig0 = np.copy(model.wv.vectors[0]) if with_corpus_file: tmpfile = get_tmpfile('gensim_word2vec.tst') utils.save_as_line_sentence(lee_corpus_list, tmpfile) model.train(corpus_file=tmpfile, total_words=model.corpus_total_words, epochs=model.epochs) else: model.train(lee_corpus_list, total_examples=model.corpus_count, epochs=model.epochs) self.assertFalse((orig0 == model.wv.vectors[1]).all()) # vector should vary after training query_word = 'attacks' expected_word = 'bombings' sims = model.wv.most_similar(query_word, topn=len(model.wv.index_to_key)) t_rank = [word for word, score in sims].index(expected_word) # in >200 calibration runs w/ calling parameters, 'terrorism' in 50-most_sim for 'war' if ranks is not None: ranks.append(t_rank) # tabulate trial rank if requested self.assertLess(t_rank, 50) query_vec = model.wv[query_word] sims2 = model.wv.most_similar([query_vec], topn=51) self.assertTrue(query_word in [word for word, score in sims2]) self.assertTrue(expected_word in [word for word, score in sims2]) def test_sg_hs(self): """Test skipgram w/ hierarchical softmax""" model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, epochs=10, workers=2) self.model_sanity(model) def test_sg_hs_fromfile(self): model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, epochs=10, workers=2) self.model_sanity(model, with_corpus_file=True) def test_sg_neg(self): """Test skipgram w/ negative sampling""" model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, epochs=10, workers=2) self.model_sanity(model) def test_sg_neg_fromfile(self): model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, epochs=10, workers=2) self.model_sanity(model, with_corpus_file=True) @unittest.skipIf('BULK_TEST_REPS' not in os.environ, reason="bulk test only occasionally run locally") def test_method_in_bulk(self): """Not run by default testing, but can be run locally to help tune stochastic aspects of tests to very-very-rarely fail. EG: % BULK_TEST_REPS=200 METHOD_NAME=test_cbow_hs pytest test_word2vec.py -k "test_method_in_bulk" Method must accept `ranks` keyword-argument, empty list into which salient internal result can be reported. """ failures = 0 ranks = [] reps = int(os.environ['BULK_TEST_REPS']) method_name = os.environ.get('METHOD_NAME', 'test_cbow_hs') # by default test that specially-troublesome one method_fn = getattr(self, method_name) for i in range(reps): try: method_fn(ranks=ranks) except Exception as ex: print('%s failed: %s' % (method_name, ex)) failures += 1 print(ranks) print(np.mean(ranks)) self.assertEquals(failures, 0, "too many failures") def test_cbow_hs(self, ranks=None): """Test CBOW w/ hierarchical softmax""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.1, window=2, hs=1, negative=0, min_count=5, epochs=60, workers=2, batch_words=1000 ) self.model_sanity(model, ranks=ranks) def test_cbow_hs_fromfile(self): model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.1, window=2, hs=1, negative=0, min_count=5, epochs=60, workers=2, batch_words=1000 ) self.model_sanity(model, with_corpus_file=True) def test_cbow_neg(self, ranks=None): """Test CBOW w/ negative sampling""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, epochs=10, workers=2, sample=0 ) self.model_sanity(model, ranks=ranks) def test_cbow_neg_fromfile(self): model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, epochs=10, workers=2, sample=0 ) self.model_sanity(model, with_corpus_file=True) def test_sg_fixedwindowsize(self): """Test skipgram with fixed window size. Use NS.""" model = word2vec.Word2Vec( sg=1, window=5, shrink_windows=False, hs=0, negative=15, min_count=5, epochs=10, workers=2 ) self.model_sanity(model) def test_sg_fixedwindowsize_fromfile(self): """Test skipgram with fixed window size. Use HS and train from file.""" model = word2vec.Word2Vec( sg=1, window=5, shrink_windows=False, hs=1, negative=0, min_count=5, epochs=10, workers=2 ) self.model_sanity(model, with_corpus_file=True) def test_cbow_fixedwindowsize(self, ranks=None): """Test CBOW with fixed window size. Use HS.""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.1, window=5, shrink_windows=False, hs=1, negative=0, min_count=5, epochs=10, workers=2 ) self.model_sanity(model, ranks=ranks) def test_cbow_fixedwindowsize_fromfile(self): """Test CBOW with fixed window size. Use NS and train from file.""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.1, window=5, shrink_windows=False, hs=0, negative=15, min_count=5, epochs=10, workers=2 ) self.model_sanity(model, with_corpus_file=True) def test_cosmul(self): model = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, hs=1, negative=0) sims = model.wv.most_similar_cosmul('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar_cosmul(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) def test_training_cbow(self): """Test CBOW word2vec training.""" # to test training, make the corpus larger by repeating its sentences over and over # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=0, hs=1, negative=0) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, sg=0, hs=1, negative=0) self.models_equal(model, model2) def test_training_sg_negative(self): """Test skip-gram (negative sampling) word2vec training.""" # to test training, make the corpus larger by repeating its sentences over and over # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=1, hs=0, negative=2) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1neg.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, sg=1, hs=0, negative=2) self.models_equal(model, model2) def test_training_cbow_negative(self): """Test CBOW (negative sampling) word2vec training.""" # to test training, make the corpus larger by repeating its sentences over and over # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=0, hs=0, negative=2) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1neg.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, sg=0, hs=0, negative=2) self.models_equal(model, model2) def test_similarities(self): """Test similarity and n_similarity methods.""" # The model is trained using CBOW model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=0, hs=0, negative=2) model.build_vocab(sentences) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) self.assertTrue(model.wv.n_similarity(['graph', 'trees'], ['trees', 'graph'])) self.assertTrue(model.wv.n_similarity(['graph'], ['trees']) == model.wv.similarity('graph', 'trees')) self.assertRaises(ZeroDivisionError, model.wv.n_similarity, ['graph', 'trees'], []) self.assertRaises(ZeroDivisionError, model.wv.n_similarity, [], ['graph', 'trees']) self.assertRaises(ZeroDivisionError, model.wv.n_similarity, [], []) def test_similar_by(self): """Test word2vec similar_by_word and similar_by_vector.""" model = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, hs=1, negative=0) wordsims = model.wv.similar_by_word('graph', topn=10) wordsims2 = model.wv.most_similar(positive='graph', topn=10) vectorsims = model.wv.similar_by_vector(model.wv['graph'], topn=10) vectorsims2 = model.wv.most_similar([model.wv['graph']], topn=10) self.assertEqual(wordsims, wordsims2) self.assertEqual(vectorsims, vectorsims2) def test_parallel(self): """Test word2vec parallel training.""" corpus = utils.RepeatCorpus(LeeCorpus(), 10000) # repeats about 33 times for workers in [4, ]: # [4, 2] model = word2vec.Word2Vec(corpus, vector_size=16, min_count=(10 * 33), workers=workers) origin_word = 'israeli' expected_neighbor = 'palestinian' sims = model.wv.most_similar(origin_word, topn=len(model.wv)) # the exact vectors and therefore similarities may differ, due to different thread collisions/randomization # so let's test only for topN neighbor_rank = [word for word, sim in sims].index(expected_neighbor) self.assertLess(neighbor_rank, 6) def test_r_n_g(self): """Test word2vec results identical with identical RNG seed.""" model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) self.models_equal(model, model2) def models_equal(self, model, model2): self.assertEqual(len(model.wv), len(model2.wv)) self.assertTrue(np.allclose(model.wv.vectors, model2.wv.vectors)) if model.hs: self.assertTrue(np.allclose(model.syn1, model2.syn1)) if model.negative: self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg)) most_common_word_index = np.argsort(model.wv.expandos['count'])[-1] most_common_word = model.wv.index_to_key[most_common_word_index] self.assertTrue(np.allclose(model.wv[most_common_word], model2.wv[most_common_word])) def test_predict_output_word(self): '''Test word2vec predict_output_word method handling for negative sampling scheme''' # under normal circumstances model_with_neg = word2vec.Word2Vec(sentences, min_count=1) predictions_with_neg = model_with_neg.predict_output_word(['system', 'human'], topn=5) self.assertTrue(len(predictions_with_neg) == 5) # out-of-vobaculary scenario predictions_out_of_vocab = model_with_neg.predict_output_word(['some', 'random', 'words'], topn=5) self.assertEqual(predictions_out_of_vocab, None) # when required model parameters have been deleted tmpf = get_tmpfile('gensim_word2vec.tst') model_with_neg.wv.save_word2vec_format(tmpf, binary=True) kv_model_with_neg = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) binary_model_with_neg = word2vec.Word2Vec() binary_model_with_neg.wv = kv_model_with_neg self.assertRaises(RuntimeError, binary_model_with_neg.predict_output_word, ['system', 'human']) # negative sampling scheme not used model_without_neg = word2vec.Word2Vec(sentences, min_count=1, hs=1, negative=0) self.assertRaises(RuntimeError, model_without_neg.predict_output_word, ['system', 'human']) # passing indices instead of words in context str_context = ['system', 'human'] mixed_context = [model_with_neg.wv.get_index(str_context[0]), str_context[1]] idx_context = [model_with_neg.wv.get_index(w) for w in str_context] prediction_from_str = model_with_neg.predict_output_word(str_context, topn=5) prediction_from_mixed = model_with_neg.predict_output_word(mixed_context, topn=5) prediction_from_idx = model_with_neg.predict_output_word(idx_context, topn=5) self.assertEqual(prediction_from_str, prediction_from_mixed) self.assertEqual(prediction_from_str, prediction_from_idx) def test_load_old_model(self): """Test loading an old word2vec model of indeterminate version""" model_file = 'word2vec_old' # which version?!? model = word2vec.Word2Vec.load(datapath(model_file)) self.assertTrue(model.wv.vectors.shape == (12, 100)) self.assertTrue(len(model.wv) == 12) self.assertTrue(len(model.wv.index_to_key) == 12) self.assertTrue(model.syn1neg.shape == (len(model.wv), model.wv.vector_size)) self.assertTrue(len(model.wv.vectors_lockf.shape) > 0) self.assertTrue(model.cum_table.shape == (12,)) self.onlineSanity(model, trained_model=True) def test_load_old_model_separates(self): """Test loading an old word2vec model of indeterminate version""" # Model stored in multiple files model_file = 'word2vec_old_sep' model = word2vec.Word2Vec.load(datapath(model_file)) self.assertTrue(model.wv.vectors.shape == (12, 100)) self.assertTrue(len(model.wv) == 12) self.assertTrue(len(model.wv.index_to_key) == 12) self.assertTrue(model.syn1neg.shape == (len(model.wv), model.wv.vector_size)) self.assertTrue(len(model.wv.vectors_lockf.shape) > 0) self.assertTrue(model.cum_table.shape == (12,)) self.onlineSanity(model, trained_model=True) def obsolete_test_load_old_models_pre_1_0(self): """Test loading pre-1.0 models""" # load really old model model_file = 'w2v-lee-v0.12.0' model = word2vec.Word2Vec.load(datapath(model_file)) self.onlineSanity(model, trained_model=True) old_versions = [ '0.12.0', '0.12.1', '0.12.2', '0.12.3', '0.12.4', '0.13.0', '0.13.1', '0.13.2', '0.13.3', '0.13.4', ] for old_version in old_versions: self._check_old_version(old_version) def test_load_old_models_1_x(self): """Test loading 1.x models""" old_versions = [ '1.0.0', '1.0.1', ] for old_version in old_versions: self._check_old_version(old_version) def test_load_old_models_2_x(self): """Test loading 2.x models""" old_versions = [ '2.0.0', '2.1.0', '2.2.0', '2.3.0', ] for old_version in old_versions: self._check_old_version(old_version) def test_load_old_models_3_x(self): """Test loading 3.x models""" # test for max_final_vocab for model saved in 3.3 model_file = 'word2vec_3.3' model = word2vec.Word2Vec.load(datapath(model_file)) self.assertEqual(model.max_final_vocab, None) self.assertEqual(model.max_final_vocab, None) old_versions = [ '3.0.0', '3.1.0', '3.2.0', '3.3.0', '3.4.0' ] for old_version in old_versions: self._check_old_version(old_version) def _check_old_version(self, old_version): logging.info("TESTING LOAD of %s Word2Vec MODEL", old_version) saved_models_dir = datapath('old_w2v_models/w2v_{}.mdl') model = word2vec.Word2Vec.load(saved_models_dir.format(old_version)) self.assertIsNone(model.corpus_total_words) self.assertTrue(len(model.wv) == 3) try: self.assertTrue(model.wv.vectors.shape == (3, 4)) except AttributeError as ae: print("WV") print(model.wv) print(dir(model.wv)) print(model.wv.syn0) raise ae # check if similarity search and online training works. self.assertTrue(len(model.wv.most_similar('sentence')) == 2) model.build_vocab(lee_corpus_list, update=True) model.train(lee_corpus_list, total_examples=model.corpus_count, epochs=model.epochs) # check if similarity search and online training works after saving and loading back the model. tmpf = get_tmpfile('gensim_word2vec.tst') model.save(tmpf) loaded_model = word2vec.Word2Vec.load(tmpf) loaded_model.build_vocab(lee_corpus_list, update=True) loaded_model.train(lee_corpus_list, total_examples=model.corpus_count, epochs=model.epochs) @log_capture() def test_build_vocab_warning(self, loglines): """Test if warning is raised on non-ideal input to a word2vec model""" sentences = ['human', 'machine'] model = word2vec.Word2Vec() model.build_vocab(sentences) warning = "Each 'sentences' item should be a list of words (usually unicode strings)." self.assertTrue(warning in str(loglines)) @log_capture() def test_train_warning(self, loglines): """Test if warning is raised if alpha rises during subsequent calls to train()""" sentences = [ ['human'], ['graph', 'trees'] ] model = word2vec.Word2Vec(min_count=1) model.build_vocab(sentences) for epoch in range(10): model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) model.alpha -= 0.002 model.min_alpha = model.alpha if epoch == 5: model.alpha += 0.05 warning = "Effective 'alpha' higher than previous training cycles" self.assertTrue(warning in str(loglines)) @log_capture() def test_train_hs_and_neg(self, loglines): """ Test if ValueError is raised when both hs=0 and negative=0 Test if warning is raised if both hs and negative are activated """ with self.assertRaises(ValueError): word2vec.Word2Vec(sentences, min_count=1, hs=0, negative=0) word2vec.Word2Vec(sentences, min_count=1, hs=1, negative=5) warning = "Both hierarchical softmax and negative sampling are activated." self.assertTrue(warning in str(loglines)) def test_train_with_explicit_param(self): model = word2vec.Word2Vec(vector_size=2, min_count=1, hs=1, negative=0) model.build_vocab(sentences) with self.assertRaises(ValueError): model.train(sentences, total_examples=model.corpus_count) with self.assertRaises(ValueError): model.train(sentences, epochs=model.epochs) with self.assertRaises(ValueError): model.train(sentences) def test_sentences_should_not_be_a_generator(self): """ Is sentences a generator object? """ gen = (s for s in sentences) self.assertRaises(TypeError, word2vec.Word2Vec, (gen,)) def test_load_on_class_error(self): """Test if exception is raised when loading word2vec model on instance""" self.assertRaises(AttributeError, load_on_instance) def test_file_should_not_be_compressed(self): """ Is corpus_file a compressed file? """ with tempfile.NamedTemporaryFile(suffix=".bz2") as fp: self.assertRaises(TypeError, word2vec.Word2Vec, (None, fp.name)) def test_reset_from(self): """Test if reset_from() uses pre-built structures from other model""" model = word2vec.Word2Vec(sentences, min_count=1) other_model = word2vec.Word2Vec(new_sentences, min_count=1) model.reset_from(other_model) self.assertEqual(model.wv.key_to_index, other_model.wv.key_to_index) def test_compute_training_loss(self): model = word2vec.Word2Vec(min_count=1, sg=1, negative=5, hs=1) model.build_vocab(sentences) model.train(sentences, compute_loss=True, total_examples=model.corpus_count, epochs=model.epochs) training_loss_val = model.get_latest_training_loss() self.assertTrue(training_loss_val > 0.0) def test_negative_ns_exp(self): """The model should accept a negative ns_exponent as a valid value.""" model = word2vec.Word2Vec(sentences, ns_exponent=-1, min_count=1, workers=1) tmpf = get_tmpfile('w2v_negative_exp.tst') model.save(tmpf) loaded_model = word2vec.Word2Vec.load(tmpf) loaded_model.train(sentences, total_examples=model.corpus_count, epochs=1) assert loaded_model.ns_exponent == -1, loaded_model.ns_exponent # endclass TestWord2VecModel
TestWord2VecModel
python
pandas-dev__pandas
pandas/tests/frame/indexing/test_take.py
{ "start": 46, "end": 3229 }
class ____: def test_take_slices_not_supported(self, float_frame): # GH#51539 df = float_frame slc = slice(0, 4, 1) with pytest.raises(TypeError, match="slice"): df.take(slc, axis=0) with pytest.raises(TypeError, match="slice"): df.take(slc, axis=1) def test_take(self, float_frame): # homogeneous order = [3, 1, 2, 0] for df in [float_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) tm.assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ["D", "B", "C", "A"]] tm.assert_frame_equal(result, expected, check_names=False) # negative indices order = [2, 1, -1] for df in [float_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) tm.assert_frame_equal(result, expected) result = df.take(order, axis=0) tm.assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ["C", "B", "D"]] tm.assert_frame_equal(result, expected, check_names=False) # illegal indices msg = "indices are out-of-bounds" with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, 30], axis=0) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, -31], axis=0) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, 5], axis=1) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, -5], axis=1) def test_take_mixed_type(self, float_string_frame): # mixed-dtype order = [4, 1, 2, 0, 3] for df in [float_string_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) tm.assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ["foo", "B", "C", "A", "D"]] tm.assert_frame_equal(result, expected) # negative indices order = [4, 1, -2] for df in [float_string_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) tm.assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ["foo", "B", "D"]] tm.assert_frame_equal(result, expected) def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame): # by dtype order = [1, 2, 0, 3] for df in [mixed_float_frame, mixed_int_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) tm.assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ["B", "C", "A", "D"]] tm.assert_frame_equal(result, expected)
TestDataFrameTake
python
keras-team__keras
keras/src/layers/rnn/simple_rnn_test.py
{ "start": 130, "end": 9276 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.SimpleRNN, init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5}, input_shape=(3, 2, 4), call_kwargs={"training": True}, expected_output_shape=(3, 3), expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, expected_num_non_trainable_variables=1, supports_masking=True, ) self.run_layer_test( layers.SimpleRNN, init_kwargs={ "units": 3, "return_sequences": True, "bias_regularizer": "l1", "kernel_regularizer": "l2", "recurrent_regularizer": "l2", }, input_shape=(3, 2, 4), expected_output_shape=(3, 2, 3), expected_num_losses=3, expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, supports_masking=True, ) def test_correctness(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) output = layer(sequence) self.assertAllClose( np.array( [ [0.405432, 0.405432, 0.405432, 0.405432], [0.73605347, 0.73605347, 0.73605347, 0.73605347], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), unroll=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.405432, 0.405432, 0.405432, 0.405432], [0.73605347, 0.73605347, 0.73605347, 0.73605347], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.11144729, 0.11144729, 0.11144729, 0.11144729], [0.5528889, 0.5528889, 0.5528889, 0.5528889], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, unroll=True, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.11144729, 0.11144729, 0.11144729, 0.11144729], [0.5528889, 0.5528889, 0.5528889, 0.5528889], ] ), output, ) def test_statefulness(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.SimpleRNN( 4, stateful=True, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.40559256, 0.40559256, 0.40559256, 0.40559256], [0.7361247, 0.7361247, 0.7361247, 0.7361247], ] ), output, ) layer.reset_state() layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.40559256, 0.40559256, 0.40559256, 0.40559256], [0.7361247, 0.7361247, 0.7361247, 0.7361247], ] ), output, ) def test_pass_initial_state(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") initial_state = np.arange(8).reshape((2, 4)).astype("float32") layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) output = layer(sequence, initial_state=initial_state) self.assertAllClose( np.array( [ [0.33621645, 0.33621645, 0.33621645, 0.33621645], [0.6262637, 0.6262637, 0.6262637, 0.6262637], ] ), output, ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence, initial_state=initial_state) self.assertAllClose( np.array( [ [0.07344437, 0.07344437, 0.07344437, 0.07344437], [0.43043602, 0.43043602, 0.43043602, 0.43043602], ] ), output, ) def test_masking(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") mask = np.array([[True, True, False, True], [True, False, False, True]]) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), unroll=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.32951632, 0.32951632, 0.32951632, 0.32951632], [0.61799484, 0.61799484, 0.61799484, 0.61799484], ] ), output, ) layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_sequences=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.0599281, 0.0599281], [0.15122814, 0.15122814], [0.15122814, 0.15122814], [0.32394567, 0.32394567], ], ), output[0], ) self.assertAllClose( np.array( [ [0.3969304, 0.3969304], [0.3969304, 0.3969304], [0.3969304, 0.3969304], [0.608085, 0.608085], ], ), output[1], ) layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_sequences=True, zero_output_for_mask=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.0599281, 0.0599281], [0.15122814, 0.15122814], [0.0, 0.0], [0.32394567, 0.32394567], ], ), output[0], ) self.assertAllClose( np.array( [ [0.3969304, 0.3969304], [0.0, 0.0], [0.0, 0.0], [0.608085, 0.608085], ], ), output[1], ) layer = layers.SimpleRNN( 4, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), go_backwards=True, ) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.07376196, 0.07376196, 0.07376196, 0.07376196], [0.43645123, 0.43645123, 0.43645123, 0.43645123], ] ), output, )
SimpleRNNTest
python
scipy__scipy
scipy/linalg/tests/test_basic.py
{ "start": 83698, "end": 86145 }
class ____: def test_matrix_norms(self): # Not all of these are matrix norms in the most technical sense. rng = np.random.default_rng(1234) for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4): for t in np.float32, np.float64, np.complex64, np.complex128, np.int64: A = 10 * rng.standard_normal((n, m)).astype(t) if np.issubdtype(A.dtype, np.complexfloating): A += 10j * rng.standard_normal((n, m)) t_high = np.complex128 else: t_high = np.float64 for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf): actual = norm(A, ord=order) desired = np.linalg.norm(A, ord=order) # SciPy may return higher precision matrix norms. # This is a consequence of using LAPACK. if not np.allclose(actual, desired): desired = np.linalg.norm(A.astype(t_high), ord=order) assert_allclose(actual, desired) def test_axis_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') b = norm(a, ord=np.inf, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1)) d = norm(a, ord=1, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) b = norm(a, ord=1, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1)) d = norm(a, ord=np.inf, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) def test_keepdims_kwd(self): a = np.arange(120, dtype='d').reshape(2, 3, 4, 5) b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True) c = norm(a, ord=1, axis=(0, 1), keepdims=True) assert_allclose(b, c) assert_(b.shape == c.shape) def test_empty(self): a = np.empty((0, 0)) assert_allclose(norm(a), 0.) assert_allclose(norm(a, axis=0), np.zeros((0,))) assert_allclose(norm(a, keepdims=True), np.zeros((1, 1))) a = np.empty((0, 3)) assert_allclose(norm(a), 0.) assert_allclose(norm(a, axis=0), np.zeros((3,))) assert_allclose(norm(a, keepdims=True), np.zeros((1, 1)))
TestMatrixNorms
python
spack__spack
lib/spack/spack/util/unparse/unparser.py
{ "start": 1005, "end": 2634 }
class ____(object): """ A node visitor base class that walks the abstract syntax tree and calls a visitor function for every node found. This function may return a value which is forwarded by the `visit` method. This class is meant to be subclassed, with the subclass adding visitor methods. Per default the visitor functions for the nodes are ``'visit_'`` + class name of the node. So a `TryFinally` node visit function would be `visit_TryFinally`. This behavior can be changed by overriding the `visit` method. If no visitor function exists for a node (return value `None`) the `generic_visit` visitor is used instead. Don't use the `NodeVisitor` if you want to apply changes to nodes during traversing. For this a special visitor exists (`NodeTransformer`) that allows modifications. """ def visit(self, node): """Visit a node.""" method = "visit_" + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) return visitor(node) def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, AST): self.visit(item) elif isinstance(value, AST): self.visit(value) # Large float and imaginary literals get turned into infinities in the AST. # We unparse those infinities to INFSTR. _INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
NodeVisitor
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 228998, "end": 249281 }
class ____(TypedDict, total=False): """ :class:`altair.RectConfig` ``TypedDict`` wrapper. Parameters ---------- align The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule). One of ``"left"``, ``"right"``, ``"center"``. **Note:** Expression reference is *not* supported for range marks. angle The rotation angle of the text, in degrees. aria A boolean flag indicating if `ARIA attributes <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on the output SVG element, removing the mark item from the ARIA accessibility tree. ariaRole Sets the type of user interface element of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the "role" attribute. Warning: this property is experimental and may be changed in the future. ariaRoleDescription A human-readable, author-localized description for the role of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the "aria-roledescription" attribute. Warning: this property is experimental and may be changed in the future. aspect Whether to keep aspect ratio of image marks. baseline For text marks, the vertical text baseline. One of ``"alphabetic"`` (default), ``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an expression reference that provides one of the valid values. The ``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are calculated relative to the ``lineHeight`` rather than ``fontSize`` alone. For range marks, the vertical alignment of the marks. One of ``"top"``, ``"middle"``, ``"bottom"``. **Note:** Expression reference is *not* supported for range marks. binSpacing Offset between bars for binned field. The ideal value for this is either 0 (preferred by statisticians) or 1 (Vega-Lite default, D3 example style). **Default value:** ``1`` blend The color blend mode for drawing an item on its current background. Any valid `CSS mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__ value can be used. **Default value:** ``"source-over"`` color Default color. **Default value:** ``"#4682b4"`` **Note:** * This property cannot be used in a `style config <https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. * The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and will override ``color``. continuousBandSize The default size of the bars on continuous scales. **Default value:** ``5`` cornerRadius The radius in pixels of rounded rectangles or arcs' corners. **Default value:** ``0`` cornerRadiusBottomLeft The radius in pixels of rounded rectangles' bottom left corner. **Default value:** ``0`` cornerRadiusBottomRight The radius in pixels of rounded rectangles' bottom right corner. **Default value:** ``0`` cornerRadiusTopLeft The radius in pixels of rounded rectangles' top right corner. **Default value:** ``0`` cornerRadiusTopRight The radius in pixels of rounded rectangles' top left corner. **Default value:** ``0`` cursor The mouse cursor used over the mark. Any valid `CSS cursor type <https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used. description A text description of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the `"aria-label" attribute <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__. dir The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"`` (right-to-left). This property determines on which side is truncated in response to the limit parameter. **Default value:** ``"ltr"`` discreteBandSize The default size of the bars with discrete dimensions. If unspecified, the default size is ``step-2``, which provides 2 pixel offset between bars. dx The horizontal offset, in pixels, between the text label and its anchor point. The offset is applied after rotation by the *angle* property. dy The vertical offset, in pixels, between the text label and its anchor point. The offset is applied after rotation by the *angle* property. ellipsis The ellipsis string for text truncated in response to the limit parameter. **Default value:** ``"…"`` endAngle The end angle in radians for arc marks. A value of ``0`` indicates up (north), increasing values proceed clockwise. fill Default fill color. This property has higher precedence than ``config.color``. Set to ``null`` to remove fill. **Default value:** (None) fillOpacity The fill opacity (value between [0,1]). **Default value:** ``1`` filled Whether the mark's color should be used as fill color instead of stroke color. **Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well as ``geoshape`` marks for `graticule <https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources; otherwise, ``true``. **Note:** This property cannot be used in a `style config <https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. font The typeface to set the text in (e.g., ``"Helvetica Neue"``). fontSize The font size, in pixels. **Default value:** ``11`` fontStyle The font style (e.g., ``"italic"``). fontWeight The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and ``"bold"`` = ``700``). height Height of the marks. href A URL to load upon mouse click. If defined, the mark acts as a hyperlink. innerRadius The inner radius in pixels of arc marks. ``innerRadius`` is an alias for ``radius2``. **Default value:** ``0`` interpolate The line interpolation method to use for line and area marks. One of the following: * ``"linear"``: piecewise linear segments, as in a polyline. * ``"linear-closed"``: close the linear segments to form a polygon. * ``"step"``: alternate between horizontal and vertical segments, as in a step function. * ``"step-before"``: alternate between vertical and horizontal segments, as in a step function. * ``"step-after"``: alternate between horizontal and vertical segments, as in a step function. * ``"basis"``: a B-spline, with control point duplication on the ends. * ``"basis-open"``: an open B-spline; may not intersect the start or end. * ``"basis-closed"``: a closed B-spline, as in a loop. * ``"cardinal"``: a Cardinal spline, with control point duplication on the ends. * ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end, but will intersect other control points. * ``"cardinal-closed"``: a closed Cardinal spline, as in a loop. * ``"bundle"``: equivalent to basis, except the tension parameter is used to straighten the spline. * ``"monotone"``: cubic interpolation that preserves monotonicity in y. invalid Invalid data mode, which defines how the marks and corresponding scales should represent invalid values (``null`` and ``NaN`` in continuous scales *without* defined output for invalid values). * ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and *scales*. For path marks (for line, area, trail), this option will create paths that connect valid points, as if the data rows with invalid values do not exist. * ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at invalid values. For non-path marks, this is equivalent to ``"filter"``. All *scale* domains will *exclude* these filtered data points. * ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid values. Hide invalid values for non-path marks. All *scale* domains will *include* these filtered data points (for both path and non-path marks). * ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each scale will use the output for invalid values defined in ``config.scale.invalid`` or, if unspecified, by default invalid values will produce the same visual values as zero (if the scale includes zero) or the minimum value (if the scale does not include zero). * ``"break-paths-show-path-domains"`` (default) — This is equivalent to ``"break-paths-show-domains"`` for path-based marks (line/area/trail) and ``"filter"`` for non-path marks. **Note**: If any channel's scale has an output for invalid values defined in ``config.scale.invalid``, all values for the scales will be considered "valid" since they can produce a reasonable output for the scales. Thus, fields for such channels will not be filtered and will not cause path breaks. limit The maximum length of the text mark in pixels. The text value will be automatically truncated if the rendered size exceeds the limit. **Default value:** ``0`` -- indicating no limit lineBreak A delimiter, such as a newline character, upon which to break text strings into multiple lines. This property is ignored if the text is array-valued. lineHeight The line height in pixels (the spacing between subsequent lines of text) for multi-line text marks. minBandSize The minimum band size for bar and rectangle marks. **Default value:** ``0.25`` opacity The overall opacity (value between [0,1]). **Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``, ``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise. order For line and trail marks, this ``order`` property can be set to ``null`` or ``false`` to make the lines use the original order in the data sources. orient The orientation of a non-stacked bar, tick, area, and line charts. The value is either horizontal (default) or vertical. * For bar, rule and tick, this determines whether the size of the bar and tick should be applied to x or y dimension. * For area, this property determines the orient property of the Vega output. * For line and trail marks, this property determines the sort order of the points in the line if ``config.sortLineBy`` is not specified. For stacked charts, this is always determined by the orientation of the stack; therefore explicitly specified value will be ignored. outerRadius The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``. **Default value:** ``0`` padAngle The angular padding applied to sides of the arc, in radians. radius For arc mark, the primary (outer) radius in pixels. For text marks, polar coordinate radial offset, in pixels, of the text from the origin determined by the ``x`` and ``y`` properties. **Default value:** ``min(plot_width, plot_height)/2`` radius2 The secondary (inner) radius in pixels of arc marks. **Default value:** ``0`` shape Shape of the point marks. Supported values include: * plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``, ``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or ``"triangle-left"``. * the line symbol ``"stroke"`` * centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"`` * a custom `SVG path string <https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct sizing, custom shape paths should be defined within a square bounding box with coordinates ranging from -1 to 1 along both the x and y dimensions.) **Default value:** ``"circle"`` size Default size for marks. * For ``point``/``circle``/``square``, this represents the pixel area of the marks. Note that this value sets the area of the symbol; the side lengths will increase with the square root of this value. * For ``bar``, this represents the band size of the bar, in pixels. * For ``text``, this represents the font size, in pixels. **Default value:** * ``30`` for point, circle, square marks; width/height's ``step`` * ``2`` for bar marks with discrete dimensions; * ``5`` for bar marks with continuous dimensions; * ``11`` for text marks. smooth A boolean flag (default true) indicating if the image should be smoothed when resized. If false, individual pixels should be scaled directly rather than interpolated with smoothing. For SVG rendering, this option may not work in some browsers due to lack of standardization. startAngle The start angle in radians for arc marks. A value of ``0`` indicates up (north), increasing values proceed clockwise. stroke Default stroke color. This property has higher precedence than ``config.color``. Set to ``null`` to remove stroke. **Default value:** (None) strokeCap The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or ``"square"``. **Default value:** ``"butt"`` strokeDash An array of alternating stroke, space lengths for creating dashed or dotted lines. strokeDashOffset The offset (in pixels) into which to begin drawing with the stroke dash array. strokeJoin The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``. **Default value:** ``"miter"`` strokeMiterLimit The miter limit at which to bevel a line join. strokeOffset The offset in pixels at which to draw the group stroke and fill. If unspecified, the default behavior is to dynamically offset stroked groups such that 1 pixel stroke widths align with the pixel grid. strokeOpacity The stroke opacity (value between [0,1]). **Default value:** ``1`` strokeWidth The stroke width, in pixels. tension Depending on the interpolation type, sets the tension parameter (for line and area marks). text Placeholder text if the ``text`` channel is not specified theta * For arc marks, the arc length in radians if theta2 is not specified, otherwise the start arc angle. (A value of 0 indicates up or “north”, increasing values proceed clockwise.) * For text marks, polar coordinate angle in radians. theta2 The end angle of arc marks in radians. A value of 0 indicates up or “north”, increasing values proceed clockwise. time timeUnitBandPosition Default relative band position for a time unit. If set to ``0``, the marks will be positioned at the beginning of the time unit band step. If set to ``0.5``, the marks will be positioned in the middle of the time unit band step. timeUnitBandSize Default relative band size for a time unit. If set to ``1``, the bandwidth of the marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the marks will be half of the time unit band step. tooltip The tooltip text string to show upon mouse hover or an object defining which fields should the tooltip be derived from. * If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from ``encoding`` will be used. * If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the highlighted data point will be used. * If set to ``null`` or ``false``, then no tooltip will be used. See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__ documentation for a detailed discussion about tooltip in Vega-Lite. **Default value:** ``null`` url The URL of the image file for image marks. width Width of the marks. x X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without specified ``x2`` or ``width``. The ``value`` of this channel can be a number or a string ``"width"`` for the width of the plot. x2 X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``. The ``value`` of this channel can be a number or a string ``"width"`` for the width of the plot. y Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without specified ``y2`` or ``height``. The ``value`` of this channel can be a number or a string ``"height"`` for the height of the plot. y2 Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``. The ``value`` of this channel can be a number or a string ``"height"`` for the height of the plot. """ align: Align_T angle: float aria: bool ariaRole: str ariaRoleDescription: str aspect: bool baseline: TextBaseline_T binSpacing: float blend: Blend_T color: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T continuousBandSize: float cornerRadius: float cornerRadiusBottomLeft: float cornerRadiusBottomRight: float cornerRadiusTopLeft: float cornerRadiusTopRight: float cursor: Cursor_T description: str dir: TextDirection_T discreteBandSize: float dx: float dy: float ellipsis: str endAngle: float fill: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None fillOpacity: float filled: bool font: str fontSize: float fontStyle: str fontWeight: FontWeight_T height: float href: str innerRadius: float interpolate: Interpolate_T invalid: MarkInvalidDataMode_T | None limit: float lineBreak: str lineHeight: float minBandSize: float opacity: float order: bool | None orient: Orientation_T outerRadius: float padAngle: float radius: float radius2: float shape: str size: float smooth: bool startAngle: float stroke: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None strokeCap: StrokeCap_T strokeDash: Sequence[float] strokeDashOffset: float strokeJoin: StrokeJoin_T strokeMiterLimit: float strokeOffset: float strokeOpacity: float strokeWidth: float tension: float text: str | Sequence[str] theta: float theta2: float time: float timeUnitBandPosition: float timeUnitBandSize: float tooltip: str | bool | float | TooltipContentKwds | None url: str width: float x: float | Literal["width"] x2: float | Literal["width"] y: float | Literal["height"] y2: float | Literal["height"]
RectConfigKwds
python
joke2k__faker
tests/providers/test_credit_card.py
{ "start": 6373, "end": 7852 }
class ____: mastercard_pattern: Pattern = re.compile( r"(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}", ) visa_pattern: Pattern = re.compile(r"4[0-9]{12}([0-9]{3}){0,2}") maestro_pattern: Pattern = re.compile(r"(67)[0-9]{14}") prostir_pattern: Pattern = re.compile(r"(9)[0-9]{15}") def test_mastercard(self, faker, num_samples): for _ in range(num_samples): number = faker.credit_card_number("mastercard") assert self.mastercard_pattern.fullmatch(number) def test_visa(self, faker, num_samples): for _ in range(num_samples): number = faker.credit_card_number("visa") assert self.visa_pattern.fullmatch(number) def test_maestro(self, faker, num_samples): for _ in range(num_samples): number = faker.credit_card_number("maestro") assert self.maestro_pattern.fullmatch(number) def test_prostir(self, faker, num_samples): for _ in range(num_samples): number = faker.credit_card_number("prostir") assert self.prostir_pattern.fullmatch(number) def test_credit_card_full(self, faker, num_samples): for _ in range(num_samples): card_data = faker.credit_card_full("prostir").split("\n") assert re.match("[A-Za-z]+", card_data[1]) assert card_data[4] in UkUaBankProvider.banks assert card_data[0] == "ПРОСТІР"
TestUkUa
python
huggingface__transformers
src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py
{ "start": 19490, "end": 19562 }
class ____(Sam2VideoMaskEmbedding): pass
Sam3TrackerVideoMaskEmbedding
python
jina-ai__jina
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
{ "start": 22665, "end": 23667 }
class ____(object): """* jina gRPC service to trigger a restore at the Executor Runtime. """ def restore(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_JinaExecutorRestoreServicer_to_server(servicer, server): rpc_method_handlers = { 'restore': grpc.unary_unary_rpc_method_handler( servicer.restore, request_deserializer=jina__pb2.RestoreSnapshotCommand.FromString, response_serializer=jina__pb2.RestoreSnapshotStatusProto.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'jina.JinaExecutorRestore', rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API.
JinaExecutorRestoreServicer
python
wandb__wandb
tests/unit_tests/test_asyncio_compat.py
{ "start": 516, "end": 1983 }
class ____: def __init__(self) -> None: self._before_exit = asyncio.Event() self._after_exit = asyncio.Event() def start( self, subtasks: list[Coroutine[Any, Any, Any]], main_task: Coroutine[Any, Any, Any] | None = None, ) -> None: """Start the tester. This schedules a parallel task that opens a task group, adds the given subtasks to it, and run the main task in the context manager body. """ async def run(): try: async with asyncio_compat.open_task_group() as task_group: for subtask in subtasks: task_group.start_soon(subtask) if main_task: await main_task self._before_exit.set() finally: self._after_exit.set() asyncio.create_task(run()) async def assert_blocked_on_exit(self) -> None: """Assert the tester's blocked waiting for the task group to exit.""" await _fail_after_timeout( self._before_exit.wait(), "Didn't reach end of task group context manager.", ) assert not self._after_exit.is_set() async def assert_exits(self) -> None: """Assert the tester has exited.""" await _fail_after_timeout( self._after_exit.wait(), "Didn't exit task group.", )
_TaskGroupTester
python
django__django
tests/prefetch_related/tests.py
{ "start": 68864, "end": 69763 }
class ____(TestCase): @classmethod def setUpTestData(cls): book1 = Book.objects.create(title="Poems") book2 = Book.objects.create(title="Jane Eyre") book3 = Book.objects.create(title="Wuthering Heights") book4 = Book.objects.create(title="Sense and Sensibility") author1 = Author2.objects.create(name="Charlotte", first_book=book1) author2 = Author2.objects.create(name="Anne", first_book=book1) author3 = Author2.objects.create(name="Emily", first_book=book1) author4 = Author2.objects.create(name="Jane", first_book=book4) author1.favorite_books.add(book1, book2, book3) author2.favorite_books.add(book1) author3.favorite_books.add(book2) author4.favorite_books.add(book3) def test_bug(self): list(Author2.objects.prefetch_related("first_book", "favorite_books"))
Ticket21410Tests
python
django__django
tests/migrate_signals/tests.py
{ "start": 479, "end": 751 }
class ____: def __init__(self, signal): self.call_counter = 0 self.call_args = None signal.connect(self, sender=APP_CONFIG) def __call__(self, signal, sender, **kwargs): self.call_counter += 1 self.call_args = kwargs
Receiver
python
coleifer__peewee
tests/models.py
{ "start": 158405, "end": 159145 }
class ____(ModelTestCase): requires = [SequenceModel] def test_create_table(self): query = SequenceModel._schema._create_table() self.assertSQL(query, ( 'CREATE TABLE IF NOT EXISTS "sequence_model" (' '"id" SERIAL NOT NULL PRIMARY KEY, ' '"seq_id" INTEGER NOT NULL DEFAULT NEXTVAL(\'seq_id_sequence\'), ' '"key" TEXT NOT NULL)'), []) def test_sequence(self): for key in ('k1', 'k2', 'k3'): SequenceModel.create(key=key) s1, s2, s3 = SequenceModel.select().order_by(SequenceModel.key) self.assertEqual(s1.seq_id, 1) self.assertEqual(s2.seq_id, 2) self.assertEqual(s3.seq_id, 3) @requires_postgresql
TestSequence
python
Pylons__pyramid
tests/test_view.py
{ "start": 40468, "end": 40579 }
class ____: scope = 'notaclass' module = sys.modules['tests'] codeinfo = 'codeinfo'
DummyVenusianInfo
python
getsentry__sentry
src/sentry/sentry_metrics/querying/units.py
{ "start": 3163, "end": 3592 }
class ____(UnitMetadata): """ Represents the unit metadata of a QueryExpression with a unit that need to be computed in the future. A future unit tells the unit normalization algorithm that it needs to apply the normalization to all downstream units once a unit in a formula has been determined. More details can be found in the UnitsNormalizationV2Visitor. """ pass @dataclass(frozen=True)
WithFutureUnit
python
python-openxml__python-docx
tests/test_shape.py
{ "start": 2283, "end": 4702 }
class ____: """Unit-test suite for `docx.shape.InlineShape` objects.""" @pytest.mark.parametrize( ("uri", "content_cxml", "expected_value"), [ # -- embedded picture -- (nsmap["pic"], "/pic:pic/pic:blipFill/a:blip{r:embed=rId1}", WD_INLINE_SHAPE.PICTURE), # -- linked picture -- ( nsmap["pic"], "/pic:pic/pic:blipFill/a:blip{r:link=rId2}", WD_INLINE_SHAPE.LINKED_PICTURE, ), # -- linked and embedded picture (not expected) -- ( nsmap["pic"], "/pic:pic/pic:blipFill/a:blip{r:embed=rId1,r:link=rId2}", WD_INLINE_SHAPE.LINKED_PICTURE, ), # -- chart -- (nsmap["c"], "", WD_INLINE_SHAPE.CHART), # -- SmartArt -- (nsmap["dgm"], "", WD_INLINE_SHAPE.SMART_ART), # -- something else we don't know about -- ("foobar", "", WD_INLINE_SHAPE.NOT_IMPLEMENTED), ], ) def it_knows_what_type_of_shape_it_is( self, uri: str, content_cxml: str, expected_value: WD_INLINE_SHAPE ): cxml = "wp:inline/a:graphic/a:graphicData{uri=%s}%s" % (uri, content_cxml) inline = cast(CT_Inline, element(cxml)) inline_shape = InlineShape(inline) assert inline_shape.type == expected_value def it_knows_its_display_dimensions(self): inline = cast(CT_Inline, element("wp:inline/wp:extent{cx=333, cy=666}")) inline_shape = InlineShape(inline) width, height = inline_shape.width, inline_shape.height assert isinstance(width, Length) assert width == 333 assert isinstance(height, Length) assert height == 666 def it_can_change_its_display_dimensions(self): inline_shape = InlineShape( cast( CT_Inline, element( "wp:inline/(wp:extent{cx=333,cy=666},a:graphic/a:graphicData/pic:pic/" "pic:spPr/a:xfrm/a:ext{cx=333,cy=666})" ), ) ) inline_shape.width = Emu(444) inline_shape.height = Emu(888) assert inline_shape._inline.xml == xml( "wp:inline/(wp:extent{cx=444,cy=888},a:graphic/a:graphicData/pic:pic/pic:spPr/" "a:xfrm/a:ext{cx=444,cy=888})" )
DescribeInlineShape
python
econchick__interrogate
src/interrogate/config.py
{ "start": 540, "end": 9195 }
class ____: """Configuration related to interrogating a given codebase. :param bool color: Highlight verbose output with color. :param str docstring_style: Style of docstrings to follow. Choices: "sphinx" (default), "google". :param fail_under: Fail when coverage % is less than a given amount. :type fail_under: `int` or `float` :param str ignore_regex: Regex identifying class, method, and function names to ignore. :param bool ignore_magic: Ignore all magic methods of classes. :param bool ignore_module: Ignore module-level docstrings. :param bool ignore_private: Ignore private classes, methods, and functions starting with two underscores. :param bool ignore_semiprivate: Ignore semiprivate classes, methods, and functions starting with a single underscore. :param bool ignore_init_method: Ignore ``__init__`` method of classes. :param bool ignore_nested_functions: Ignore nested functions and methods. :param bool ignore_init_module: Ignore ``__init__.py`` modules. :param str include_regex: Regex identifying class, method, and function names to include. :param bool omit_covered_files: Omit reporting files that have 100% documentation coverage. :param bool ignore_overloaded_functions: Ignore `@typing.overload`-decorated functions. """ VALID_STYLES = ("sphinx", "google") color: bool = attr.ib(default=False) docstring_style: str = attr.ib(default="sphinx") fail_under: float = attr.ib(default=80.0) ignore_regex: list[re.Pattern[str]] | None = attr.ib(default=None) ignore_magic: bool = attr.ib(default=False) ignore_module: bool = attr.ib(default=False) ignore_private: bool = attr.ib(default=False) ignore_semiprivate: bool = attr.ib(default=False) ignore_init_method: bool = attr.ib(default=False) ignore_init_module: bool = attr.ib(default=False) ignore_nested_classes: bool = attr.ib(default=False) ignore_nested_functions: bool = attr.ib(default=False) ignore_property_setters: bool = attr.ib(default=False) ignore_property_decorators: bool = attr.ib(default=False) ignore_overloaded_functions: bool = attr.ib(default=False) include_regex: list[re.Pattern[str]] | None = attr.ib(default=None) omit_covered_files: bool = attr.ib(default=False) @docstring_style.validator def _check_style(self, attribute: str, value: str) -> None: """Validate selected choice for docstring style""" if value not in self.VALID_STYLES: raise ValueError( f"Invalid `docstring_style` '{value}'. Valid values: " f"{', '.join(self.VALID_STYLES)}" ) def find_project_root(srcs: Sequence[str]) -> pathlib.Path: """Return a directory containing .git, .hg, or pyproject.toml. That directory can be one of the directories passed in `srcs` or their common parent. If no directory in the tree contains a marker that would specify it's the project root, the root of the file system is returned. """ if not srcs: return pathlib.Path("/").resolve() common_base = min(pathlib.Path(src).resolve() for src in srcs) if common_base.is_dir(): # Append a fake file so `parents` below returns `common_base_dir`, too. common_base /= "fake-file" for directory in common_base.parents: if (directory / ".git").exists(): return directory if (directory / ".hg").is_dir(): return directory if (directory / "pyproject.toml").is_file(): return directory return directory def find_project_config(path_search_start: Sequence[str]) -> str | None: """Find the absolute filepath to a pyproject.toml if it exists.""" project_root = find_project_root(path_search_start) pyproject_toml = project_root / "pyproject.toml" if pyproject_toml.is_file(): return str(pyproject_toml) setup_cfg = project_root / "setup.cfg" return str(setup_cfg) if setup_cfg.is_file() else None def parse_pyproject_toml(path_config: str) -> dict[str, Any]: """Parse ``pyproject.toml`` file and return relevant parts for Interrogate. :param str path_config: Path to ``pyproject.toml`` file. :return: Dictionary representing configuration for Interrogate. :rtype: dict :raise OSError: an I/O-related error when opening ``pyproject.toml``. :raise tomllib.TOMLDecodeError: unable to load ``pyproject.toml``. """ with open(path_config, "rb") as f: pyproject_toml = tomllib.load(f) config = pyproject_toml.get("tool", {}).get("interrogate", {}) return { k.replace("--", "").replace("-", "_"): v for k, v in config.items() } def sanitize_list_values(value: str) -> list[str | None]: """Parse a string of list items to a Python list. This is super hacky... :param str value: string-representation of a Python list :return: List of strings :rtype: list """ if not value: return [] if value.startswith("["): value = value[1:] if value.endswith("]"): value = value[:-1] if not value: return [] raw_values = [v.strip() for v in value.split(",")] return [v.strip('"') for v in raw_values] def parse_setup_cfg(path_config: str) -> dict[str, Any] | None: """Parse ``setup.cfg`` file and return relevant parts for Interrogate. This is super hacky... :param str path_config: Path to ``setup.cfg`` file. :return: Dictionary representing configuration for Interrogate. :rtype: dict :raise OSError: an I/O-related error when opening ``setup.cfg``. :raise configparser.ConfigParser: unable to load ``setup.cfg``. """ cfg = configparser.ConfigParser() cfg.read(path_config) try: interrogate_section = cfg["tool:interrogate"] except KeyError: return None keys_for_list_values = ["whitelist_regex", "ignore_regex", "exclude"] raw_config = dict(interrogate_section.items()) config = { k.replace("--", "").replace("-", "_"): v for k, v in raw_config.items() } for k, v in config.items(): if k in keys_for_list_values: config[k] = sanitize_list_values(v) # type: ignore elif v.lower() == "false": config[k] = False # type: ignore elif v.lower() == "true": config[k] = True # type: ignore return config def read_config_file( ctx: click.Context, param: click.Parameter, value: str | None ) -> str | None: """Inject config from ``pyproject.toml`` or ``setup.py`` into ``ctx``. These override option defaults, but still respect option values provided via the CLI. :param click.Context ctx: click command context. :param click.Parameter param: click command parameter (in this case, ``config`` from ``-c|--config``). :param str value: path to ``pyproject.toml`` or ``setup.cfg`` file. :return: path to ``pyproject.toml`` or ``setup.cfg`` file. :rtype: str :raise click.FileError: if ``pyproject.toml`` or ``setup.cfg`` is not parseable or otherwise not available (i.e. does not exist). """ if not value: paths = ctx.params.get("paths") if not paths: paths = (os.path.abspath(os.getcwd()),) value = find_project_config(paths) if value is None: return None config = None if value.endswith(".toml"): try: config = parse_pyproject_toml(value) except (tomllib.TOMLDecodeError, OSError) as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}", ) elif value.endswith(".cfg"): try: config = parse_setup_cfg(value) except configparser.ParsingError as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}", ) if not config: return None if ctx.default_map is None: ctx.default_map = {} # for backwards compatibility. before 1.1.3, only one regex was allowed. # with 1.1.3+, multiple regexes can be provided, but we want to honor # those that configured their pyproject.toml to be a single regex # string (since now we're expecting a list of strings). if "ignore_regex" in config: if isinstance(config["ignore_regex"], str): config["ignore_regex"] = [config["ignore_regex"]] ctx.default_map.update(config) return value
InterrogateConfig
python
giampaolo__psutil
psutil/_common.py
{ "start": 15951, "end": 25254 }
class ____: """Watches numbers so that they don't overflow and wrap (reset to zero). """ def __init__(self): self.lock = threading.Lock() self.cache = {} self.reminders = {} self.reminder_keys = {} def _add_dict(self, input_dict, name): assert name not in self.cache assert name not in self.reminders assert name not in self.reminder_keys self.cache[name] = input_dict self.reminders[name] = collections.defaultdict(int) self.reminder_keys[name] = collections.defaultdict(set) def _remove_dead_reminders(self, input_dict, name): """In case the number of keys changed between calls (e.g. a disk disappears) this removes the entry from self.reminders. """ old_dict = self.cache[name] gone_keys = set(old_dict.keys()) - set(input_dict.keys()) for gone_key in gone_keys: for remkey in self.reminder_keys[name][gone_key]: del self.reminders[name][remkey] del self.reminder_keys[name][gone_key] def run(self, input_dict, name): """Cache dict and sum numbers which overflow and wrap. Return an updated copy of `input_dict`. """ if name not in self.cache: # This was the first call. self._add_dict(input_dict, name) return input_dict self._remove_dead_reminders(input_dict, name) old_dict = self.cache[name] new_dict = {} for key in input_dict: input_tuple = input_dict[key] try: old_tuple = old_dict[key] except KeyError: # The input dict has a new key (e.g. a new disk or NIC) # which didn't exist in the previous call. new_dict[key] = input_tuple continue bits = [] for i in range(len(input_tuple)): input_value = input_tuple[i] old_value = old_tuple[i] remkey = (key, i) if input_value < old_value: # it wrapped! self.reminders[name][remkey] += old_value self.reminder_keys[name][key].add(remkey) bits.append(input_value + self.reminders[name][remkey]) new_dict[key] = tuple(bits) self.cache[name] = input_dict return new_dict def cache_clear(self, name=None): """Clear the internal cache, optionally only for function 'name'.""" with self.lock: if name is None: self.cache.clear() self.reminders.clear() self.reminder_keys.clear() else: self.cache.pop(name, None) self.reminders.pop(name, None) self.reminder_keys.pop(name, None) def cache_info(self): """Return internal cache dicts as a tuple of 3 elements.""" with self.lock: return (self.cache, self.reminders, self.reminder_keys) def wrap_numbers(input_dict, name): """Given an `input_dict` and a function `name`, adjust the numbers which "wrap" (restart from zero) across different calls by adding "old value" to "new value" and return an updated dict. """ with _wn.lock: return _wn.run(input_dict, name) _wn = _WrapNumbers() wrap_numbers.cache_clear = _wn.cache_clear wrap_numbers.cache_info = _wn.cache_info # The read buffer size for open() builtin. This (also) dictates how # much data we read(2) when iterating over file lines as in: # >>> with open(file) as f: # ... for line in f: # ... ... # Default per-line buffer size for binary files is 1K. For text files # is 8K. We use a bigger buffer (32K) in order to have more consistent # results when reading /proc pseudo files on Linux, see: # https://github.com/giampaolo/psutil/issues/2050 # https://github.com/giampaolo/psutil/issues/708 FILE_READ_BUFFER_SIZE = 32 * 1024 def open_binary(fname): return open(fname, "rb", buffering=FILE_READ_BUFFER_SIZE) def open_text(fname): """Open a file in text mode by using the proper FS encoding and en/decoding error handlers. """ # See: # https://github.com/giampaolo/psutil/issues/675 # https://github.com/giampaolo/psutil/pull/733 fobj = open( # noqa: SIM115 fname, buffering=FILE_READ_BUFFER_SIZE, encoding=ENCODING, errors=ENCODING_ERRS, ) try: # Dictates per-line read(2) buffer size. Defaults is 8k. See: # https://github.com/giampaolo/psutil/issues/2050#issuecomment-1013387546 fobj._CHUNK_SIZE = FILE_READ_BUFFER_SIZE except AttributeError: pass except Exception: fobj.close() raise return fobj def cat(fname, fallback=_DEFAULT, _open=open_text): """Read entire file content and return it as a string. File is opened in text mode. If specified, `fallback` is the value returned in case of error, either if the file does not exist or it can't be read(). """ if fallback is _DEFAULT: with _open(fname) as f: return f.read() else: try: with _open(fname) as f: return f.read() except OSError: return fallback def bcat(fname, fallback=_DEFAULT): """Same as above but opens file in binary mode.""" return cat(fname, fallback=fallback, _open=open_binary) def bytes2human(n, format="%(value).1f%(symbol)s"): """Used by various scripts. See: https://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/?in=user-4178764. >>> bytes2human(10000) '9.8K' >>> bytes2human(100001221) '95.4M' """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if abs(n) >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def get_procfs_path(): """Return updated psutil.PROCFS_PATH constant.""" return sys.modules['psutil'].PROCFS_PATH def decode(s): return s.decode(encoding=ENCODING, errors=ENCODING_ERRS) # ===================================================================== # --- shell utils # ===================================================================== @memoize def term_supports_colors(file=sys.stdout): # pragma: no cover if not hasattr(file, "isatty") or not file.isatty(): return False try: file.fileno() except Exception: # noqa: BLE001 return False return True def hilite(s, color=None, bold=False): # pragma: no cover """Return an highlighted version of 'string'.""" if not term_supports_colors(): return s attr = [] colors = dict( blue='34', brown='33', darkgrey='30', green='32', grey='37', lightblue='36', red='91', violet='35', yellow='93', ) colors[None] = '29' try: color = colors[color] except KeyError: msg = f"invalid color {color!r}; choose amongst {list(colors.keys())}" raise ValueError(msg) from None attr.append(color) if bold: attr.append('1') return f"\x1b[{';'.join(attr)}m{s}\x1b[0m" def print_color( s, color=None, bold=False, file=sys.stdout ): # pragma: no cover """Print a colorized version of string.""" if not term_supports_colors(): print(s, file=file) elif POSIX: print(hilite(s, color, bold), file=file) else: import ctypes DEFAULT_COLOR = 7 GetStdHandle = ctypes.windll.Kernel32.GetStdHandle SetConsoleTextAttribute = ( ctypes.windll.Kernel32.SetConsoleTextAttribute ) colors = dict(green=2, red=4, brown=6, yellow=6) colors[None] = DEFAULT_COLOR try: color = colors[color] except KeyError: msg = ( f"invalid color {color!r}; choose between" f" {list(colors.keys())!r}" ) raise ValueError(msg) from None if bold and color <= 7: color += 8 handle_id = -12 if file is sys.stderr else -11 GetStdHandle.restype = ctypes.c_ulong handle = GetStdHandle(handle_id) SetConsoleTextAttribute(handle, color) try: print(s, file=file) finally: SetConsoleTextAttribute(handle, DEFAULT_COLOR) def debug(msg): """If PSUTIL_DEBUG env var is set, print a debug message to stderr.""" if PSUTIL_DEBUG: import inspect fname, lineno, _, _lines, _index = inspect.getframeinfo( inspect.currentframe().f_back ) if isinstance(msg, Exception): if isinstance(msg, OSError): # ...because str(exc) may contain info about the file name msg = f"ignoring {msg}" else: msg = f"ignoring {msg!r}" print( # noqa: T201 f"psutil-debug [{fname}:{lineno}]> {msg}", file=sys.stderr )
_WrapNumbers
python
allegroai__clearml
clearml/backend_api/services/v2_9/tasks.py
{ "start": 134083, "end": 135289 }
class ____(Response): """ Response of tasks.delete_hyper_params endpoint. :param deleted: Indicates if the task was updated successfully :type deleted: int """ _service = "tasks" _action = "delete_hyper_params" _version = "2.9" _schema = { "definitions": {}, "properties": { "deleted": { "description": "Indicates if the task was updated successfully", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, deleted: Optional[int] = None, **kwargs: Any) -> None: super(DeleteHyperParamsResponse, self).__init__(**kwargs) self.deleted = deleted @schema_property("deleted") def deleted(self) -> Optional[int]: return self._property_deleted @deleted.setter def deleted(self, value: Optional[int]) -> None: if value is None: self._property_deleted = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "deleted", six.integer_types) self._property_deleted = value
DeleteHyperParamsResponse
python
python-pillow__Pillow
src/PIL/ImageCms.py
{ "start": 3726, "end": 3849 }
class ____(IntEnum): PERCEPTUAL = 0 RELATIVE_COLORIMETRIC = 1 SATURATION = 2 ABSOLUTE_COLORIMETRIC = 3
Intent
python
ray-project__ray
python/ray/serve/_private/common.py
{ "start": 27800, "end": 29491 }
class ____: """Sent from the HTTP proxy to replicas on the streaming codepath.""" def __init__( self, asgi_scope: Scope, *, proxy_actor_name: Optional[str] = None, receive_asgi_messages: Optional[ Callable[[RequestMetadata], Awaitable[bytes]] ] = None, ): self._asgi_scope: Scope = asgi_scope if proxy_actor_name is None and receive_asgi_messages is None: raise ValueError( "Either proxy_actor_name or receive_asgi_messages must be provided." ) # If receive_asgi_messages is passed, it'll be called directly. # If proxy_actor_name is passed, the actor will be fetched and its # `receive_asgi_messages` method will be called. self._proxy_actor_name: Optional[str] = proxy_actor_name # Need to keep the actor handle cached to avoid "lost reference to actor" error. self._cached_proxy_actor: Optional[ActorHandle] = None self._receive_asgi_messages: Optional[ Callable[[RequestMetadata], Awaitable[bytes]] ] = receive_asgi_messages @property def asgi_scope(self) -> Scope: return self._asgi_scope @property def receive_asgi_messages(self) -> Callable[[RequestMetadata], Awaitable[bytes]]: if self._receive_asgi_messages is None: self._cached_proxy_actor = ray.get_actor( self._proxy_actor_name, namespace=SERVE_NAMESPACE ) self._receive_asgi_messages = ( self._cached_proxy_actor.receive_asgi_messages.remote ) return self._receive_asgi_messages
StreamingHTTPRequest
python
mwaskom__seaborn
seaborn/relational.py
{ "start": 7244, "end": 14853 }
class ____(_RelationalPlotter): _legend_attributes = ["color", "linewidth", "marker", "dashes"] def __init__( self, *, data=None, variables={}, estimator=None, n_boot=None, seed=None, errorbar=None, sort=True, orient="x", err_style=None, err_kws=None, legend=None ): # TODO this is messy, we want the mapping to be agnostic about # the kind of plot to draw, but for the time being we need to set # this information so the SizeMapping can use it self._default_size_range = ( np.r_[.5, 2] * mpl.rcParams["lines.linewidth"] ) super().__init__(data=data, variables=variables) self.estimator = estimator self.errorbar = errorbar self.n_boot = n_boot self.seed = seed self.sort = sort self.orient = orient self.err_style = err_style self.err_kws = {} if err_kws is None else err_kws self.legend = legend def plot(self, ax, kws): """Draw the plot onto an axes, passing matplotlib kwargs.""" # Draw a test plot, using the passed in kwargs. The goal here is to # honor both (a) the current state of the plot cycler and (b) the # specified kwargs on all the lines we will draw, overriding when # relevant with the data semantics. Note that we won't cycle # internally; in other words, if `hue` is not used, all elements will # have the same color, but they will have the color that you would have # gotten from the corresponding matplotlib function, and calling the # function will advance the axes property cycle. kws = normalize_kwargs(kws, mpl.lines.Line2D) kws.setdefault("markeredgewidth", 0.75) kws.setdefault("markeredgecolor", "w") # Set default error kwargs err_kws = self.err_kws.copy() if self.err_style == "band": err_kws.setdefault("alpha", .2) elif self.err_style == "bars": pass elif self.err_style is not None: err = "`err_style` must be 'band' or 'bars', not {}" raise ValueError(err.format(self.err_style)) # Initialize the aggregation object weighted = "weight" in self.plot_data agg = (WeightedAggregator if weighted else EstimateAggregator)( self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed, ) # TODO abstract variable to aggregate over here-ish. Better name? orient = self.orient if orient not in {"x", "y"}: err = f"`orient` must be either 'x' or 'y', not {orient!r}." raise ValueError(err) other = {"x": "y", "y": "x"}[orient] # TODO How to handle NA? We don't want NA to propagate through to the # estimate/CI when some values are present, but we would also like # matplotlib to show "gaps" in the line when all values are missing. # This is straightforward absent aggregation, but complicated with it. # If we want to use nas, we need to conditionalize dropna in iter_data. # Loop over the semantic subsets and add to the plot grouping_vars = "hue", "size", "style" for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True): if self.sort: sort_vars = ["units", orient, other] sort_cols = [var for var in sort_vars if var in self.variables] sub_data = sub_data.sort_values(sort_cols) if ( self.estimator is not None and sub_data[orient].value_counts().max() > 1 ): if "units" in self.variables: # TODO eventually relax this constraint err = "estimator must be None when specifying units" raise ValueError(err) grouped = sub_data.groupby(orient, sort=self.sort) # Could pass as_index=False instead of reset_index, # but that fails on a corner case with older pandas. sub_data = ( grouped .apply(agg, other, **groupby_apply_include_groups(False)) .reset_index() ) else: sub_data[f"{other}min"] = np.nan sub_data[f"{other}max"] = np.nan # Apply inverse axis scaling for var in "xy": _, inv = _get_transform_functions(ax, var) for col in sub_data.filter(regex=f"^{var}"): sub_data[col] = inv(sub_data[col]) # --- Draw the main line(s) if "units" in self.variables: # XXX why not add to grouping variables? lines = [] for _, unit_data in sub_data.groupby("units"): lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws)) else: lines = ax.plot(sub_data["x"], sub_data["y"], **kws) for line in lines: if "hue" in sub_vars: line.set_color(self._hue_map(sub_vars["hue"])) if "size" in sub_vars: line.set_linewidth(self._size_map(sub_vars["size"])) if "style" in sub_vars: attributes = self._style_map(sub_vars["style"]) if "dashes" in attributes: line.set_dashes(attributes["dashes"]) if "marker" in attributes: line.set_marker(attributes["marker"]) line_color = line.get_color() line_alpha = line.get_alpha() line_capstyle = line.get_solid_capstyle() # --- Draw the confidence intervals if self.estimator is not None and self.errorbar is not None: # TODO handling of orientation will need to happen here if self.err_style == "band": func = {"x": ax.fill_between, "y": ax.fill_betweenx}[orient] func( sub_data[orient], sub_data[f"{other}min"], sub_data[f"{other}max"], color=line_color, **err_kws ) elif self.err_style == "bars": error_param = { f"{other}err": ( sub_data[other] - sub_data[f"{other}min"], sub_data[f"{other}max"] - sub_data[other], ) } ebars = ax.errorbar( sub_data["x"], sub_data["y"], **error_param, linestyle="", color=line_color, alpha=line_alpha, **err_kws ) # Set the capstyle properly on the error bars for obj in ebars.get_children(): if isinstance(obj, mpl.collections.LineCollection): obj.set_capstyle(line_capstyle) # Finalize the axes details self._add_axis_labels(ax) if self.legend: legend_artist = partial(mpl.lines.Line2D, xdata=[], ydata=[]) attrs = {"hue": "color", "size": "linewidth", "style": None} self.add_legend_data(ax, legend_artist, kws, attrs) handles, _ = ax.get_legend_handles_labels() if handles: legend = ax.legend(title=self.legend_title) adjust_legend_subtitles(legend)
_LinePlotter
python
huggingface__transformers
src/transformers/models/gptj/configuration_gptj.py
{ "start": 793, "end": 5502 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-J [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50400): Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPTJModel`]. n_positions (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. rotary_dim (`int`, *optional*, defaults to 64): Number of dimensions in the embedding that Rotary Position Embedding is applied to. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu_new"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import GPTJModel, GPTJConfig >>> # Initializing a GPT-J 6B configuration >>> configuration = GPTJConfig() >>> # Initializing a model from the configuration >>> model = GPTJModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gptj" attribute_map = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, vocab_size=50400, n_positions=2048, n_embd=4096, n_layer=28, n_head=16, rotary_dim=64, n_inner=None, activation_function="gelu_new", resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=1e-5, initializer_range=0.02, use_cache=True, bos_token_id=50256, eos_token_id=50256, tie_word_embeddings=False, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.rotary_dim = rotary_dim self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs ) __all__ = ["GPTJConfig"]
GPTJConfig
python
mlflow__mlflow
mlflow/genai/judges/base.py
{ "start": 579, "end": 1178 }
class ____(ABC): """ Abstract base class for judge alignment optimizers. Alignment optimizers improve judge accuracy by learning from traces that contain human feedback. """ @abstractmethod def align(self, judge: Judge, traces: list[Trace]) -> Judge: """ Align a judge using the provided traces. Args: judge: The judge to be optimized traces: List of traces containing alignment data (feedback) Returns: A new Judge instance that is better aligned with the input traces. """
AlignmentOptimizer