language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pytorch__pytorch
test/dynamo/cpython/3_13/typinganndata/ann_module695.py
{ "start": 65, "end": 140 }
class ____[T, *Ts, **P]: x: T y: tuple[*Ts] z: Callable[P, str]
A
python
apache__airflow
providers/edge3/src/airflow/providers/edge3/cli/dataclasses.py
{ "start": 1619, "end": 2116 }
class ____: """Status of the worker.""" job_count: int jobs: list state: EdgeWorkerState maintenance: bool maintenance_comments: str | None drain: bool @property def json(self) -> str: """Get the status as JSON.""" return json.dumps(asdict(self)) @staticmethod def from_json(json_str: str) -> WorkerStatus: """Create a WorkerStatus object from JSON.""" return WorkerStatus(**json.loads(json_str)) @dataclass
WorkerStatus
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_pset.py
{ "start": 102, "end": 5706 }
class ____(object): """ Persistent set implementation. Built on top of the persistent map. The set supports all operations in the Set protocol and is Hashable. Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset` to create an instance. Random access and insert is log32(n) where n is the size of the set. Some examples: >>> s = pset([1, 2, 3, 1]) >>> s2 = s.add(4) >>> s3 = s2.remove(2) >>> s pset([1, 2, 3]) >>> s2 pset([1, 2, 3, 4]) >>> s3 pset([1, 3, 4]) """ __slots__ = ('_map', '__weakref__') def __new__(cls, m): self = super(PSet, cls).__new__(cls) self._map = m return self def __contains__(self, element): return element in self._map def __iter__(self): return iter(self._map) def __len__(self): return len(self._map) def __repr__(self): if not self: return 'p' + str(set(self)) return 'pset([{0}])'.format(str(set(self))[1:-1]) def __str__(self): return self.__repr__() def __hash__(self): return hash(self._map) def __reduce__(self): # Pickling support return pset, (list(self),) @classmethod def _from_iterable(cls, it, pre_size=8): return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size)) def add(self, element): """ Return a new PSet with element added >>> s1 = s(1, 2) >>> s1.add(3) pset([1, 2, 3]) """ return self.evolver().add(element).persistent() def update(self, iterable): """ Return a new PSet with elements in iterable added >>> s1 = s(1, 2) >>> s1.update([3, 4, 4]) pset([1, 2, 3, 4]) """ e = self.evolver() for element in iterable: e.add(element) return e.persistent() def remove(self, element): """ Return a new PSet with element removed. Raises KeyError if element is not present. >>> s1 = s(1, 2) >>> s1.remove(2) pset([1]) """ if element in self._map: return self.evolver().remove(element).persistent() raise KeyError("Element '%s' not present in PSet" % repr(element)) def discard(self, element): """ Return a new PSet with element removed. Returns itself if element is not present. """ if element in self._map: return self.evolver().remove(element).persistent() return self class _Evolver(object): __slots__ = ('_original_pset', '_pmap_evolver') def __init__(self, original_pset): self._original_pset = original_pset self._pmap_evolver = original_pset._map.evolver() def add(self, element): self._pmap_evolver[element] = True return self def remove(self, element): del self._pmap_evolver[element] return self def is_dirty(self): return self._pmap_evolver.is_dirty() def persistent(self): if not self.is_dirty(): return self._original_pset return PSet(self._pmap_evolver.persistent()) def __len__(self): return len(self._pmap_evolver) def copy(self): return self def evolver(self): """ Create a new evolver for this pset. For a discussion on evolvers in general see the documentation for the pvector evolver. Create the evolver and perform various mutating updates to it: >>> s1 = s(1, 2, 3) >>> e = s1.evolver() >>> _ = e.add(4) >>> len(e) 4 >>> _ = e.remove(1) The underlying pset remains the same: >>> s1 pset([1, 2, 3]) The changes are kept in the evolver. An updated pmap can be created using the persistent() function on the evolver. >>> s2 = e.persistent() >>> s2 pset([2, 3, 4]) The new pset will share data with the original pset in the same way that would have been done if only using operations on the pset. """ return PSet._Evolver(self) # All the operations and comparisons you would expect on a set. # # This is not very beautiful. If we avoid inheriting from PSet we can use the # __slots__ concepts (which requires a new style class) and hopefully save some memory. __le__ = Set.__le__ __lt__ = Set.__lt__ __gt__ = Set.__gt__ __ge__ = Set.__ge__ __eq__ = Set.__eq__ __ne__ = Set.__ne__ __and__ = Set.__and__ __or__ = Set.__or__ __sub__ = Set.__sub__ __xor__ = Set.__xor__ issubset = __le__ issuperset = __ge__ union = __or__ intersection = __and__ difference = __sub__ symmetric_difference = __xor__ isdisjoint = Set.isdisjoint Set.register(PSet) Hashable.register(PSet) _EMPTY_PSET = PSet(pmap()) def pset(iterable=(), pre_size=8): """ Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that used for :py:func:`pmap`. >>> s1 = pset([1, 2, 3, 2]) >>> s1 pset([1, 2, 3]) """ if not iterable: return _EMPTY_PSET return PSet._from_iterable(iterable, pre_size=pre_size) def s(*elements): """ Create a persistent set. Takes an arbitrary number of arguments to insert into the new set. >>> s1 = s(1, 2, 3, 2) >>> s1 pset([1, 2, 3]) """ return pset(elements)
PSet
python
keras-team__keras
keras/src/ops/nn.py
{ "start": 67068, "end": 70331 }
class ____(Operation): def __init__(self, axis, epsilon=1e-3, *, name=None): super().__init__(name=name) self.axis = axis self.epsilon = epsilon def call(self, x, mean, variance, offset=None, scale=None): return backend.nn.batch_normalization( x, mean, variance, axis=self.axis, offset=offset, scale=scale, epsilon=self.epsilon, ) def _check_shape(self, name, shape, expected_shape): if shape != expected_shape: raise ValueError( f"Arguments `{name}` must be a vector of length " f"`x.shape[axis]`. Expected: `{expected_shape}`. " f"Received: `{shape}." ) def compute_output_spec(self, x, mean, variance, offset, scale): shape = (x.shape[self.axis],) self._check_shape("mean", tuple(mean.shape), shape) self._check_shape("variance", tuple(variance.shape), shape) if offset is not None: self._check_shape("offset", tuple(offset.shape), shape) if offset is not scale: self._check_shape("scale", tuple(scale.shape), shape) return KerasTensor(x.shape, dtype=x.dtype) @keras_export( [ "keras.ops.batch_normalization", "keras.ops.nn.batch_normalization", ] ) def batch_normalization( x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3 ): """Normalizes `x` by `mean` and `variance`. This op is typically used by the batch normalization step in a neural network. It normalizes the input tensor along the given axis. Args: x: Input tensor. mean: A mean vector of the same length as the `axis` dimension of the input thensor. variance: A variance vector of the same length as the `axis` dimension of the input tensor. axis: Integer, the axis that should be normalized. offset: An offset vector of the same length as the `axis` dimension of the input tensor. If not `None`, `offset` is added to the normalized tensor. Defaults to `None`. scale: A scale vector of the same length as the `axis` dimension of the input tensor. If not `None`, the normalized tensor is multiplied by `scale`. Defaults to `None`. epsilon: Small float added to variance to avoid dividing by zero. Defaults to 1e-3. Returns: The normalized tensor. Example: >>> x = keras.ops.convert_to_tensor( ... [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]] ... ) >>> keras.ops.batch_normalization( ... x, ... mean=[0.4, 0.5, 0.6], ... variance=[0.67, 0.67, 0.67], ... axis=-1 ... ) array([[-3.6624e-01, -3.6624e-01, -3.6624e-01], [-4.6445e-09, 0.0000e+00, -1.8578e-08], [ 3.6624e-01, 3.6624e-01, 3.6624e-01]]) """ if any_symbolic_tensors((x, mean, variance, offset, scale)): return BatchNorm(axis, epsilon).symbolic_call( x, mean, variance, offset, scale ) return backend.nn.batch_normalization( x, mean, variance, axis, offset, scale, epsilon )
BatchNorm
python
huggingface__transformers
examples/modular-transformers/modeling_new_task_model.py
{ "start": 1429, "end": 1986 }
class ____(BaseModelOutputWithPast): r""" image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for NewTaskModel causal language model (or autoregressive) outputs. """ )
NewTaskModelModelOutputWithPast
python
getsentry__sentry
src/sentry/api/bases/organization.py
{ "start": 1825, "end": 3714 }
class ____(DemoSafePermission): scope_map = { "GET": ["org:read", "org:write", "org:admin"], "POST": ["org:write", "org:admin"], "PUT": ["org:write", "org:admin"], "DELETE": ["org:admin"], } def is_not_2fa_compliant( self, request: Request, organization: RpcOrganization | Organization ) -> bool: if not organization.flags.require_2fa: return False if request.user.is_authenticated and request.user.has_2fa(): return False if request.user.is_authenticated and request.user.is_sentry_app: return False if request.user.is_anonymous: return False if is_active_superuser(request): return False return True def needs_sso(self, request: Request, organization: Organization | RpcOrganization) -> bool: # XXX(dcramer): this is very similar to the server-rendered views # logic for checking valid SSO if not request.access.requires_sso: return False if not auth.has_completed_sso(request, organization.id): return True if not request.access.sso_is_valid: return True return False def has_object_permission( self, request: Request, view: APIView, organization: Organization | RpcOrganization | RpcUserOrganizationContext, ) -> bool: self.determine_access(request, organization) allowed_scopes = set(self.scope_map.get(request.method or "", [])) return any(request.access.has_scope(s) for s in allowed_scopes) def is_member_disabled_from_limit( self, request: Request, organization: Organization | RpcOrganization | RpcUserOrganizationContext, ) -> bool: return is_member_disabled_from_limit(request, organization)
OrganizationPermission
python
pyca__cryptography
src/cryptography/x509/name.py
{ "start": 12017, "end": 15363 }
class ____: _OID_RE = re.compile(r"(0|([1-9]\d*))(\.(0|([1-9]\d*)))+") _DESCR_RE = re.compile(r"[a-zA-Z][a-zA-Z\d-]*") _ESCAPE_SPECIAL = r"[\\ #=\"\+,;<>]" _ESCAPE_HEX = r"[\da-zA-Z]{2}" _PAIR = rf"\\({_ESCAPE_SPECIAL}|{_ESCAPE_HEX})" _PAIR_MULTI_RE = re.compile(rf"(\\{_ESCAPE_SPECIAL})|((\\{_ESCAPE_HEX})+)") _LUTF1 = r"[\x01-\x1f\x21\x24-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" _SUTF1 = r"[\x01-\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" _TUTF1 = r"[\x01-\x1F\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]" _UTFMB = rf"[\x80-{chr(sys.maxunicode)}]" _LEADCHAR = rf"{_LUTF1}|{_UTFMB}" _STRINGCHAR = rf"{_SUTF1}|{_UTFMB}" _TRAILCHAR = rf"{_TUTF1}|{_UTFMB}" _STRING_RE = re.compile( rf""" ( ({_LEADCHAR}|{_PAIR}) ( ({_STRINGCHAR}|{_PAIR})* ({_TRAILCHAR}|{_PAIR}) )? )? """, re.VERBOSE, ) _HEXSTRING_RE = re.compile(r"#([\da-zA-Z]{2})+") def __init__(self, data: str, attr_name_overrides: _NameOidMap) -> None: self._data = data self._idx = 0 self._attr_name_overrides = attr_name_overrides def _has_data(self) -> bool: return self._idx < len(self._data) def _peek(self) -> str | None: if self._has_data(): return self._data[self._idx] return None def _read_char(self, ch: str) -> None: if self._peek() != ch: raise ValueError self._idx += 1 def _read_re(self, pat) -> str: match = pat.match(self._data, pos=self._idx) if match is None: raise ValueError val = match.group() self._idx += len(val) return val def parse(self) -> Name: """ Parses the `data` string and converts it to a Name. According to RFC4514 section 2.1 the RDNSequence must be reversed when converting to string representation. So, when we parse it, we need to reverse again to get the RDNs on the correct order. """ if not self._has_data(): return Name([]) rdns = [self._parse_rdn()] while self._has_data(): self._read_char(",") rdns.append(self._parse_rdn()) return Name(reversed(rdns)) def _parse_rdn(self) -> RelativeDistinguishedName: nas = [self._parse_na()] while self._peek() == "+": self._read_char("+") nas.append(self._parse_na()) return RelativeDistinguishedName(nas) def _parse_na(self) -> NameAttribute[str]: try: oid_value = self._read_re(self._OID_RE) except ValueError: name = self._read_re(self._DESCR_RE) oid = self._attr_name_overrides.get( name, _NAME_TO_NAMEOID.get(name) ) if oid is None: raise ValueError else: oid = ObjectIdentifier(oid_value) self._read_char("=") if self._peek() == "#": value = self._read_re(self._HEXSTRING_RE) value = binascii.unhexlify(value[1:]).decode() else: raw_value = self._read_re(self._STRING_RE) value = _unescape_dn_value(raw_value) return NameAttribute(oid, value)
_RFC4514NameParser
python
airbytehq__airbyte
airbyte-integrations/connectors/source-genesys/source_genesys/source.py
{ "start": 1901, "end": 2125 }
class ____(GenesysStream): """ API Docs: https://developer.genesys.cloud/routing/routing/ """ primary_key = "id" def path(self, **kwargs) -> str: return "routing/assessments"
RoutingOutboundEvents
python
wandb__wandb
wandb/vendor/pygments/styles/algol.py
{ "start": 1372, "end": 2263 }
class ____(Style): background_color = "#ffffff" default_style = "" styles = { Comment: "italic #888", Comment.Preproc: "bold noitalic #888", Comment.Special: "bold noitalic #888", Keyword: "underline bold", Keyword.Declaration: "italic", Name.Builtin: "bold italic", Name.Builtin.Pseudo: "bold italic", Name.Namespace: "bold italic #666", Name.Class: "bold italic #666", Name.Function: "bold italic #666", Name.Variable: "bold italic #666", Name.Constant: "bold italic #666", Operator.Word: "bold", String: "italic #666", Error: "border:#FF0000" }
AlgolStyle
python
tensorflow__tensorflow
tensorflow/python/util/dispatch_test.py
{ "start": 2047, "end": 2862 }
class ____(object): """A fake composite tensor class, for testing type-based dispatching.""" def __init__(self, tensor, score): self.tensor = ops.convert_to_tensor(tensor) self.score = score @tf_export("test_op") @dispatch.add_dispatch_support def test_op(x, y, z): """A fake op for testing dispatch of Python ops.""" return x + (2 * y) + (3 * z) @tf_export("test_op_with_optional") @dispatch.add_dispatch_support def test_op_with_optional(x, y, z, optional=None): """A fake op for testing dispatch of Python ops.""" del optional return x + (2 * y) + (3 * z) @tf_export("test_op_with_kwonly") @dispatch.add_dispatch_support def test_op_with_kwonly(*, x, y, z, optional=None): """A fake op for testing dispatch of Python ops.""" del optional return x + (2 * y) + (3 * z)
CustomTensor
python
PrefectHQ__prefect
src/prefect/transactions.py
{ "start": 1105, "end": 1211 }
class ____(AutoEnum): READ_COMMITTED = AutoEnum.auto() SERIALIZABLE = AutoEnum.auto()
IsolationLevel
python
getsentry__sentry
src/sentry/grouping/variants.py
{ "start": 547, "end": 693 }
class ____(TypedDict): values: list[str] client_values: NotRequired[list[str]] matched_rule: NotRequired[str]
FingerprintVariantMetadata
python
gevent__gevent
src/gevent/tests/test__pywsgi.py
{ "start": 56251, "end": 58190 }
class ____(TestCase): validator = None def application(self, environ, start_response): content_length = int(environ['CONTENT_LENGTH']) if content_length > 1024: start_response('417 Expectation Failed', [('Content-Length', '7'), ('Content-Type', 'text/plain')]) return [b'failure'] # pywsgi did sent a "100 continue" for each read # see http://code.google.com/p/gevent/issues/detail?id=93 text = environ['wsgi.input'].read(1) text += environ['wsgi.input'].read(content_length - 1) start_response('200 OK', [('Content-Length', str(len(text))), ('Content-Type', 'text/plain')]) return [text] def test_continue(self): with self.makefile() as fd: padding = 'a'*1025 fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\nExpect: 100-continue\r\n\r\n' + padding) read_http(fd, code=417, body="failure") fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\nExpect: 100-continue\r\n\r\ntesting') read_http(fd, code=100) read_http(fd, body="testing") with self.makefile() as fd: padding = 'a'*947 body = 'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: {}\r\nExpect: 100-continue\r\n\r\n{}'.format(len(padding), padding) # The following content should be regarded as one request. fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: {}\r\nExpect: 100-continue\r\n\r\n{}'.format(len(body), body)) read_http(fd, code=417, body="failure") fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\nExpect: 100-continue\r\n\r\ntesting') read_http(fd, code=100) # If there was a smuggling issue beforehand, what is obtained here will be aaa..aaa read_http(fd, body="testing")
Expect100ContinueTests
python
ray-project__ray
python/ray/llm/_internal/batch/stages/tokenize_stage.py
{ "start": 367, "end": 1878 }
class ____(StatefulStageUDF): def __init__( self, data_column: str, expected_input_keys: List[str], model: str, ): """ Initialize the TokenizeUDF. Args: data_column: The data column name. expected_input_keys: The expected input keys of the stage. model: The model to use for the chat template. """ from transformers import AutoTokenizer super().__init__(data_column, expected_input_keys) model_path = download_model_files( model_id=model, mirror_config=None, download_model=NodeModelDownloadable.TOKENIZER_ONLY, download_extra_files=False, ) self.tokenizer = get_cached_tokenizer( AutoTokenizer.from_pretrained( model_path, trust_remote_code=True, ) ) async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]: """ Tokenize the given batch. Args: batch: A list of rows to send. Yields: A generator of rows with the tokenized prompt. """ for row, prompt_token_ids in zip( batch, self.tokenizer([row["prompt"] for row in batch])["input_ids"], ): yield { self.IDX_IN_BATCH_COLUMN: row[self.IDX_IN_BATCH_COLUMN], "tokenized_prompt": prompt_token_ids, }
TokenizeUDF
python
pypa__setuptools
setuptools/_distutils/command/build.py
{ "start": 356, "end": 5923 }
class ____(Command): description = "build everything needed to install" user_options = [ ('build-base=', 'b', "base directory for build library"), ('build-purelib=', None, "build directory for platform-neutral distributions"), ('build-platlib=', None, "build directory for platform-specific distributions"), ( 'build-lib=', None, "build directory for all distribution (defaults to either build-purelib or build-platlib", ), ('build-scripts=', None, "build directory for scripts"), ('build-temp=', 't', "temporary build directory"), ( 'plat-name=', 'p', f"platform name to build for, if supported [default: {get_platform()}]", ), ('compiler=', 'c', "specify the compiler type"), ('parallel=', 'j', "number of parallel build jobs"), ('debug', 'g', "compile extensions and libraries with debugging information"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ('executable=', 'e', "specify final destination interpreter path (build.py)"), ] boolean_options: ClassVar[list[str]] = ['debug', 'force'] help_options: ClassVar[list[tuple[str, str | None, str, Callable[[], object]]]] = [ ('help-compiler', None, "list available compilers", show_compilers), ] def initialize_options(self): self.build_base = 'build' # these are decided only after 'build_base' has its final value # (unless overridden by the user or client) self.build_purelib = None self.build_platlib = None self.build_lib = None self.build_temp = None self.build_scripts = None self.compiler = None self.plat_name = None self.debug = None self.force = False self.executable = None self.parallel = None def finalize_options(self) -> None: # noqa: C901 if self.plat_name is None: self.plat_name = get_platform() else: # plat-name only supported for windows (other platforms are # supported via ./configure flags, if at all). Avoid misleading # other platforms. if os.name != 'nt': raise DistutilsOptionError( "--plat-name only supported on Windows (try " "using './configure --help' on your platform)" ) plat_specifier = f".{self.plat_name}-{sys.implementation.cache_tag}" # Python 3.13+ with --disable-gil shouldn't share build directories if sysconfig.get_config_var('Py_GIL_DISABLED'): plat_specifier += 't' # Make it so Python 2.x and Python 2.x with --with-pydebug don't # share the same build directories. Doing so confuses the build # process for C modules if hasattr(sys, 'gettotalrefcount'): plat_specifier += '-pydebug' # 'build_purelib' and 'build_platlib' just default to 'lib' and # 'lib.<plat>' under the base build directory. We only use one of # them for a given distribution, though -- if self.build_purelib is None: self.build_purelib = os.path.join(self.build_base, 'lib') if self.build_platlib is None: self.build_platlib = os.path.join(self.build_base, 'lib' + plat_specifier) # 'build_lib' is the actual directory that we will use for this # particular module distribution -- if user didn't supply it, pick # one of 'build_purelib' or 'build_platlib'. if self.build_lib is None: if self.distribution.has_ext_modules(): self.build_lib = self.build_platlib else: self.build_lib = self.build_purelib # 'build_temp' -- temporary directory for compiler turds, # "build/temp.<plat>" if self.build_temp is None: self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier) if self.build_scripts is None: self.build_scripts = os.path.join( self.build_base, f'scripts-{sys.version_info.major}.{sys.version_info.minor}', ) if self.executable is None and sys.executable: self.executable = os.path.normpath(sys.executable) if isinstance(self.parallel, str): try: self.parallel = int(self.parallel) except ValueError: raise DistutilsOptionError("parallel should be an integer") def run(self) -> None: # Run all relevant sub-commands. This will be some subset of: # - build_py - pure Python modules # - build_clib - standalone C libraries # - build_ext - Python extensions # - build_scripts - (Python) scripts for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) # -- Predicates for the sub-command list --------------------------- def has_pure_modules(self): return self.distribution.has_pure_modules() def has_c_libraries(self): return self.distribution.has_c_libraries() def has_ext_modules(self): return self.distribution.has_ext_modules() def has_scripts(self): return self.distribution.has_scripts() sub_commands = [ ('build_py', has_pure_modules), ('build_clib', has_c_libraries), ('build_ext', has_ext_modules), ('build_scripts', has_scripts), ]
build
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 91117, "end": 91548 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("branch_protection_rule_id", "client_mutation_id") branch_protection_rule_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="branchProtectionRuleId" ) client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
DeleteBranchProtectionRuleInput
python
pytorch__pytorch
test/test_tensorexpr.py
{ "start": 399, "end": 1089 }
class ____(JitTestCase): def setUp(self): super().setUp() self.tensorexpr_options = TensorExprTestOptions() self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda'] self.dtypes = [torch.float32, torch.bfloat16] if LLVM_ENABLED else [torch.float32] def tearDown(self): self.tensorexpr_options.restore() super().tearDown() def assertLastGraphAllFused(self): self.assertAllFused(torch.jit.last_executed_optimized_graph()) def warmup_and_run_forward(f, *args): for _ in range(torch._C._jit_get_num_profiled_runs() + 1): results = f(*args) return results @skipIfTorchDynamo()
BaseTestClass
python
scrapy__scrapy
tests/test_spidermiddleware_output_chain.py
{ "start": 4020, "end": 4425 }
class ____(GeneratorCallbackSpider): name = "GeneratorCallbackSpiderMiddlewareRightAfterSpider" custom_settings = { "SPIDER_MIDDLEWARES": { LogExceptionMiddleware: 100000, }, } # ================================================================================ # (3) exceptions from a spider callback (not a generator)
GeneratorCallbackSpiderMiddlewareRightAfterSpider
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 106842, "end": 109917 }
class ____(ASTDeclarator): def __init__(self, inner: ASTDeclarator, next: ASTDeclarator) -> None: assert inner assert next self.inner = inner self.next = next # TODO: we assume the name, params, and qualifiers are in inner def __eq__(self, other: object) -> bool: if not isinstance(other, ASTDeclaratorParen): return NotImplemented return self.inner == other.inner and self.next == other.next def __hash__(self) -> int: return hash((self.inner, self.next)) @property def name(self) -> ASTNestedName: return self.inner.name @name.setter def name(self, name: ASTNestedName) -> None: self.inner.name = name @property def isPack(self) -> bool: return self.inner.isPack or self.next.isPack @property def function_params(self) -> list[ASTFunctionParameter]: return self.inner.function_params @property def trailingReturn(self) -> ASTType: return self.inner.trailingReturn def require_space_after_declSpecs(self) -> bool: return True def _stringify(self, transform: StringifyTransform) -> str: res = ['('] res.extend(( transform(self.inner), ')', transform(self.next), )) return ''.join(res) def get_modifiers_id(self, version: int) -> str: return self.inner.get_modifiers_id(version) def get_param_id(self, version: int) -> str: # only the parameters (if any) return self.inner.get_param_id(version) def get_ptr_suffix_id(self, version: int) -> str: if version == 1: raise NoOldIdError # TODO: was this implemented before? ptr_suffix_id_next = self.next.get_ptr_suffix_id(version) ptr_suffix_id_inner = self.inner.get_ptr_suffix_id(version) return ptr_suffix_id_next + ptr_suffix_id_inner ptr_suffix_id_inner = self.inner.get_ptr_suffix_id(version) ptr_suffix_id_next = self.next.get_ptr_suffix_id(version) return ptr_suffix_id_inner + ptr_suffix_id_next def get_type_id(self, version: int, returnTypeId: str) -> str: assert version >= 2 # ReturnType (inner)next, so 'inner' returns everything outside next_id = self.next.get_type_id(version, returnTypeId) return self.inner.get_type_id(version, returnTypeId=next_id) def is_function_type(self) -> bool: return self.inner.is_function_type() def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: verify_description_mode(mode) signode += addnodes.desc_sig_punctuation('(', '(') self.inner.describe_signature(signode, mode, env, symbol) signode += addnodes.desc_sig_punctuation(')', ')') self.next.describe_signature(signode, 'noneIsName', env, symbol) # Type and initializer stuff ##############################################################################################
ASTDeclaratorParen
python
sympy__sympy
sympy/simplify/hyperexpand.py
{ "start": 40389, "end": 40651 }
class ____(Operator): """ Increment a lower b index. """ def __init__(self, bi): bi = sympify(bi) self._poly = Poly(-bi + _x, _x) def __str__(self): return '<Increment lower b=%s.>' % (-self._poly.all_coeffs()[1])
MeijerShiftC
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarDefault2.py
{ "start": 407, "end": 435 }
class ____[T = 3]: ...
ClassT1
python
huggingface__transformers
src/transformers/models/rwkv/modeling_rwkv.py
{ "start": 8316, "end": 11589 }
class ____(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.config = config kernel_loaded = rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == config.context_length if is_ninja_available() and is_torch_cuda_available() and not kernel_loaded: try: load_wkv_cuda_kernel(config.context_length) except Exception: logger.info("Could not load the custom CUDA kernel for RWKV attention.") self.layer_id = layer_id hidden_size = config.hidden_size attention_hidden_size = ( config.attention_hidden_size if config.attention_hidden_size is not None else hidden_size ) self.attention_hidden_size = attention_hidden_size self.time_decay = nn.Parameter(torch.empty(attention_hidden_size)) self.time_first = nn.Parameter(torch.empty(attention_hidden_size)) self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_mix_value = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False) self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False) # TODO: maybe jit, otherwise move inside forward def extract_key_value(self, hidden, state=None): # Mix hidden with the previous timestep to produce key, value, receptance if hidden.size(1) == 1 and state is not None: shifted = state[1][:, :, self.layer_id] else: shifted = self.time_shift(hidden) if state is not None: shifted[:, 0] = state[1][:, :, self.layer_id] key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key) value = hidden * self.time_mix_value + shifted * (1 - self.time_mix_value) receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance) key = self.key(key) value = self.value(value) receptance = torch.sigmoid(self.receptance(receptance)) if state is not None: state[1][:, :, self.layer_id] = hidden[:, -1] return receptance, key, value, state def forward(self, hidden, state=None, use_cache=False): receptance, key, value, state = self.extract_key_value(hidden, state=state) layer_state = tuple(s[:, :, self.layer_id] for s in state[2:]) if state is not None else None rwkv, layer_state = rwkv_linear_attention( self.time_decay, self.time_first, key, value, state=layer_state, return_state=use_cache, ) if layer_state is not None: state[2][:, :, self.layer_id] = layer_state[0] state[3][:, :, self.layer_id] = layer_state[1] state[4][:, :, self.layer_id] = layer_state[2] return self.output(receptance * rwkv), state
RwkvSelfAttention
python
pytorch__pytorch
test/functorch/test_memory_efficient_fusion.py
{ "start": 11993, "end": 12596 }
class ____(TestCase): def test_random(self): def f(x): vals = [x] ops = [torch.clone, torch.cos, torch.tanh, torch.nn.functional.gelu] for _ in range(100): new_val = random.choice(ops)(random.choice(vals)) vals.append(new_val) return vals[-1] fx_g = fx.symbolic_trace(f) fx_g.graph.eliminate_dead_code() fx_g.recompile() t = torch.randn(2, 2) for _ in range(30): check(fx_g, t, -1, graph_input=True) if __name__ == "__main__": run_tests()
RandomOpTestCase
python
PrefectHQ__prefect
src/integrations/prefect-dbt/prefect_dbt/cloud/exceptions.py
{ "start": 1004, "end": 1114 }
class ____(DbtCloudException): """Raised when a triggered job run is not complete."""
DbtCloudJobRunIncomplete
python
getsentry__sentry
src/sentry/api/endpoints/organization_trace.py
{ "start": 910, "end": 3906 }
class ____(OrganizationEventsV2EndpointBase): """Replaces OrganizationEventsTraceEndpoint""" publish_status = { "GET": ApiPublishStatus.PRIVATE, } def get_projects( self, request: HttpRequest, organization: Organization | RpcOrganization, force_global_perms: bool = False, include_all_accessible: bool = False, project_ids: set[int] | None = None, project_slugs: set[str] | None = None, ) -> list[Project]: """The trace endpoint always wants to get all projects regardless of what's passed into the API This is because a trace can span any number of projects in an organization. But we still want to use the get_projects function to check for any permissions. So we'll just pass project_ids=-1 everytime which is what would be sent if we wanted all projects""" return super().get_projects( request, organization, project_ids={-1}, project_slugs=None, include_all_accessible=True, ) @sentry_sdk.tracing.trace def query_trace_data( self, snuba_params: SnubaParams, trace_id: str, error_id: str | None = None, additional_attributes: list[str] | None = None, include_uptime: bool = False, ) -> list[SerializedEvent]: return query_trace_data( snuba_params, trace_id, error_id, additional_attributes, include_uptime ) def has_feature(self, organization: Organization, request: Request) -> bool: return bool( features.has("organizations:trace-spans-format", organization, actor=request.user) ) def get(self, request: Request, organization: Organization, trace_id: str) -> HttpResponse: if not self.has_feature(organization, request): return Response(status=404) try: snuba_params = self.get_snuba_params(request, organization) except NoProjects: return Response(status=404) additional_attributes = request.GET.getlist("additional_attributes", []) include_uptime = request.GET.get("include_uptime", "0") == "1" error_id = request.GET.get("errorId") if error_id is not None and not is_event_id(error_id): raise ParseError(f"eventId: {error_id} needs to be a valid uuid") def data_fn(offset: int, limit: int) -> list[SerializedEvent]: """offset and limit don't mean anything on this endpoint currently""" with handle_query_errors(): update_snuba_params_with_timestamp(request, snuba_params) spans = self.query_trace_data( snuba_params, trace_id, error_id, additional_attributes, include_uptime ) return spans return self.paginate( request=request, paginator=GenericOffsetPaginator(data_fn=data_fn), )
OrganizationTraceEndpoint
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride3.py
{ "start": 293, "end": 420 }
class ____: def func1(self, a: int, b: int = 3) -> str: ... # This should generate an error because func1 is incompatible.
A2
python
coleifer__peewee
tests/sqlite.py
{ "start": 91515, "end": 91692 }
class ____(TestModel): key = TextField(primary_key=True) value = IntegerField() @skip_unless(database.server_version >= (3, 35, 0), 'sqlite returning clause required')
KVR
python
django__django
tests/m2m_regress/models.py
{ "start": 1168, "end": 1212 }
class ____(SelfRefer): pass
SelfReferChild
python
doocs__leetcode
solution/1200-1299/1255.Maximum Score Words Formed by Letters/Solution.py
{ "start": 0, "end": 491 }
class ____: def maxScoreWords( self, words: List[str], letters: List[str], score: List[int] ) -> int: cnt = Counter(letters) n = len(words) ans = 0 for i in range(1 << n): cur = Counter(''.join([words[j] for j in range(n) if i >> j & 1])) if all(v <= cnt[c] for c, v in cur.items()): t = sum(v * score[ord(c) - ord('a')] for c, v in cur.items()) ans = max(ans, t) return ans
Solution
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_autograd.py
{ "start": 10091, "end": 11171 }
class ____(FSDPTestMultiThread): @property def world_size(self) -> int: return 2 @skip_if_lt_x_gpu(1) def test_post_acc_grad_hook_runs(self): param_name_to_hook_count = collections.defaultdict(int) def hook(param_name: str, param: torch.Tensor) -> None: nonlocal param_name_to_hook_count param_name_to_hook_count[param_name] += 1 model = MLP(8) for module in (model.in_proj, model.out_proj, model): fully_shard(module) for param_name, param in model.named_parameters(): param_hook = functools.partial(hook, param_name) param.register_post_accumulate_grad_hook(param_hook) inp = torch.randn((2, 8), device=device_type) model(inp).sum().backward() param_names = {param_name for param_name, _ in model.named_parameters()} self.assertEqual(param_names, set(param_name_to_hook_count.keys())) for count in param_name_to_hook_count.values(): self.assertEqual(count, 1)
TestFullyShardPostAccGradHookMultiThread
python
google__pytype
pytype/tests/test_typing2.py
{ "start": 23492, "end": 26915 }
class ____(test_base.BaseTest): """Typing tests (Python 3).""" def test_namedtuple_item(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ from typing import NamedTuple class Ret(NamedTuple): x: int y: str def f() -> Ret: ... """, ) ty = self.Infer( """ import foo w = foo.f()[-1] x = foo.f()[0] y = foo.f()[1] z = foo.f()[2] # out of bounds, fall back to the combined element type """, pythonpath=[d.path], ) self.assertTypesMatchPytd( ty, """ import foo from typing import Union w: str x: int y: str z: Union[int, str] """, ) def test_import_all(self): python = [ "from typing import * # pytype: disable=not-supported-yet", ] + pep484.ALL_TYPING_NAMES ty = self.Infer("\n".join(python)) self.assertTypesMatchPytd(ty, "") def test_callable_func_name(self): self.Check(""" from typing import Any, Callable def foo(fn: Callable[[Any], Any]) -> str: return fn.__qualname__ """) def test_classvar(self): ty = self.Infer(""" from typing import ClassVar class A: x: ClassVar[int] = 5 print(A.x + 3) # make sure using a ClassVar[int] as an int works """) self.assertTypesMatchPytd( ty, """ from typing import ClassVar class A: x: ClassVar[int] """, ) def test_uninitialized_classvar(self): ty = self.Infer(""" from typing import ClassVar class A: x: ClassVar[int] """) self.assertTypesMatchPytd( ty, """ from typing import ClassVar class A: x: ClassVar[int] """, ) def test_pyi_classvar_of_union(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ from typing import ClassVar, Optional class Foo: x: ClassVar[Optional[str]] """, ) self.Check( """ import foo from typing import Optional def f(x: Optional[str]): pass f(foo.Foo.x) """, pythonpath=[d.path], ) def test_ordered_dict(self): self.Check(""" import collections from typing import OrderedDict def f(x: OrderedDict[str, int]): ... f(collections.OrderedDict(a=0)) def g(x: collections.OrderedDict[str, int]): ... g(OrderedDict(a=0)) """) def test_instantiate_ordered_dict(self): self.Check(""" from typing import OrderedDict OrderedDict() """) def test_typed_dict(self): with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ from typing_extensions import TypedDict X = TypedDict('X', {'a': int}) """, ) self.CheckWithErrors( """ import foo from typing import Dict def f1(x: Dict[str, int]): pass def f2(x: Dict[int, str]): pass def f3(x: foo.X): pass x = None # type: foo.X f1(x) # okay f2(x) # wrong-arg-types f3({'a': 0}) # okay f3({0: 'a'}) # wrong-arg-types """, pythonpath=[d.path], )
TypingTestPython3Feature
python
getsentry__sentry
src/sentry_plugins/github/webhooks/integration.py
{ "start": 623, "end": 1479 }
class ____(GithubWebhookBase): _handlers = { "push": PushEventWebhook, "installation": InstallationEventWebhook, "installation_repositories": InstallationRepositoryEventWebhook, } @method_decorator(csrf_exempt) def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase: if request.method != "POST": return HttpResponse(status=405) return super().dispatch(request, *args, **kwargs) def get_secret(self, organization: Organization) -> str | None: return options.get("github.integration-hook-secret") def post(self, request: Request) -> HttpResponse: logger.error( "github_plugin.install.deprecation_check", extra={"meta": request.META}, ) return self.handle(request)
GithubPluginIntegrationsWebhookEndpoint
python
dagster-io__dagster
python_modules/dagster-test/dagster_test/components/complex_schema_asset.py
{ "start": 453, "end": 959 }
class ____(Component, Resolvable): """An asset that has a complex schema.""" value: str list_value: list[str] obj_value: dict[str, str] op: Optional[OpSpec] = None asset_attributes: Optional[ResolvedAssetAttributes] = None def build_defs(self, context: ComponentLoadContext) -> Definitions: @asset(spec=self.asset_attributes) def dummy(context: AssetExecutionContext): return self.value return Definitions(assets=[dummy])
ComplexAssetComponent
python
pytorch__pytorch
torch/_dynamo/exc.py
{ "start": 7258, "end": 7383 }
class ____(Unsupported): pass # debug exception thrown when tracing torch._dynamo.step_unsupported()
RecompileLimitExceeded
python
django-haystack__django-haystack
haystack/indexes.py
{ "start": 13939, "end": 14879 }
class ____(SearchIndex): text = CharField(document=True, use_template=True) # End SearchIndexes # Begin ModelSearchIndexes def index_field_from_django_field(f, default=CharField): """ Returns the Haystack field type that would likely be associated with each Django type. """ result = default if f.get_internal_type() in ("DateField", "DateTimeField"): result = DateTimeField elif f.get_internal_type() in ("BooleanField", "NullBooleanField"): result = BooleanField elif f.get_internal_type() in ("CommaSeparatedIntegerField",): result = MultiValueField elif f.get_internal_type() in ("DecimalField", "FloatField"): result = FloatField elif f.get_internal_type() in ( "IntegerField", "PositiveIntegerField", "PositiveSmallIntegerField", "SmallIntegerField", ): result = IntegerField return result
BasicSearchIndex
python
django__django
tests/delete_regress/tests.py
{ "start": 777, "end": 2243 }
class ____(TransactionTestCase): available_apps = ["delete_regress"] def setUp(self): # Create a second connection to the default database self.conn2 = connection.copy() self.conn2.set_autocommit(False) # Close down the second connection. self.addCleanup(self.conn2.close) self.addCleanup(self.conn2.rollback) def test_concurrent_delete(self): """Concurrent deletes don't collide and lock the database (#9479).""" with transaction.atomic(): Book.objects.create(id=1, pagecount=100) Book.objects.create(id=2, pagecount=200) Book.objects.create(id=3, pagecount=300) with transaction.atomic(): # Start a transaction on the main connection. self.assertEqual(3, Book.objects.count()) # Delete something using another database connection. with self.conn2.cursor() as cursor2: cursor2.execute("DELETE from delete_regress_book WHERE id = 1") self.conn2.commit() # In the same transaction on the main connection, perform a # queryset delete that covers the object deleted with the other # connection. This causes an infinite loop under MySQL InnoDB # unless we keep track of already deleted objects. Book.objects.filter(pagecount__lt=250).delete() self.assertEqual(1, Book.objects.count())
DeleteLockingTest
python
django-extensions__django-extensions
tests/management/test_modelviz.py
{ "start": 129, "end": 2354 }
class ____(SimpleTestCase): def test_generate_graph_data_can_render_label(self): app_labels = ["auth"] data = generate_graph_data(app_labels) models = data["graphs"][0]["models"] user_data = [x for x in models if x["name"] == "User"][0] relation_labels = [x["label"] for x in user_data["relations"]] self.assertIn("groups (user)", relation_labels) def test_render_unicode_field_label(self): app_labels = ["django_extensions"] data = generate_graph_data(app_labels, verbose_names=True) models = data["graphs"][0]["models"] model = [x for x in models if x["name"] == "UnicodeVerboseNameModel"][0] fields = dict((_f["name"], _f["label"]) for _f in model["fields"]) expected = { "id": "ID", "cafe": "Café", "parent_cafe": "Café latte", } self.assertEqual(expected, fields) def test_on_delete_color_coding(self): app_labels = ["django_extensions"] data = generate_graph_data(app_labels, color_code_deletions=True) models = data["graphs"][0]["models"] for model in models: relations = [ x for x in model["relations"] if x["type"] in ("ForeignKey", "OneToOneField") ] for relation in relations: field = [ x["field"] for x in model["fields"] if x["name"] == relation["name"] ][0] on_delete = getattr(field.remote_field, "on_delete", None) expected_color = ON_DELETE_COLORS[on_delete] self.assertIn("color={}".format(expected_color), relation["arrows"]) def test_disabled_on_delete_color_coding(self): app_labels = ["django_extensions"] data = generate_graph_data(app_labels) models = data["graphs"][0]["models"] for model in models: relations = [ x for x in model["relations"] if x["type"] in ("ForeignKey", "OneToOneField") ] for relation in relations: self.assertNotIn("color=", relation["arrows"])
ModelVizTests
python
sqlalchemy__sqlalchemy
test/ext/declarative/test_reflection.py
{ "start": 1559, "end": 1742 }
class ____(DeclarativeReflectionBase): def teardown_test(self): super().teardown_test() _DeferredDeclarativeConfig._configs.clear() Base = None
DeferredReflectBase
python
tornadoweb__tornado
tornado/testing.py
{ "start": 2287, "end": 13493 }
class ____(unittest.TestCase): """`~unittest.TestCase` subclass for testing `.IOLoop`-based asynchronous code. The unittest framework is synchronous, so the test must be complete by the time the test method returns. This means that asynchronous code cannot be used in quite the same way as usual and must be adapted to fit. To write your tests with coroutines, decorate your test methods with `tornado.testing.gen_test` instead of `tornado.gen.coroutine`. This class also provides the (deprecated) `stop()` and `wait()` methods for a more manual style of testing. The test method itself must call ``self.wait()``, and asynchronous callbacks should call ``self.stop()`` to signal completion. By default, a new `.IOLoop` is constructed for each test and is available as ``self.io_loop``. If the code being tested requires a reused global `.IOLoop`, subclasses should override `get_new_ioloop` to return it, although this is deprecated as of Tornado 6.3. The `.IOLoop`'s ``start`` and ``stop`` methods should not be called directly. Instead, use `self.stop <stop>` and `self.wait <wait>`. Arguments passed to ``self.stop`` are returned from ``self.wait``. It is possible to have multiple ``wait``/``stop`` cycles in the same test. Example:: # This test uses coroutine style. class MyTestCase(AsyncTestCase): @tornado.testing.gen_test def test_http_fetch(self): client = AsyncHTTPClient() response = yield client.fetch("http://www.tornadoweb.org") # Test contents of response self.assertIn("FriendFeed", response.body) # This test uses argument passing between self.stop and self.wait. class MyTestCase2(AsyncTestCase): def test_http_fetch(self): client = AsyncHTTPClient() client.fetch("http://www.tornadoweb.org/", self.stop) response = self.wait() # Test contents of response self.assertIn("FriendFeed", response.body) """ def __init__(self, methodName: str = "runTest") -> None: super().__init__(methodName) self.__stopped = False self.__running = False self.__failure = None # type: Optional[_ExcInfoTuple] self.__stop_args = None # type: Any self.__timeout = None # type: Optional[object] # Not used in this class itself, but used by @gen_test self._test_generator = None # type: Optional[Union[Generator, Coroutine]] def setUp(self) -> None: py_ver = sys.version_info if ((3, 10, 0) <= py_ver < (3, 10, 9)) or ((3, 11, 0) <= py_ver <= (3, 11, 1)): # Early releases in the Python 3.10 and 3.1 series had deprecation # warnings that were later reverted; we must suppress them here. setup_with_context_manager(self, warnings.catch_warnings()) warnings.filterwarnings( "ignore", message="There is no current event loop", category=DeprecationWarning, module=r"tornado\..*", ) super().setUp() if type(self).get_new_ioloop is not AsyncTestCase.get_new_ioloop: warnings.warn("get_new_ioloop is deprecated", DeprecationWarning) self.io_loop = self.get_new_ioloop() asyncio.set_event_loop(self.io_loop.asyncio_loop) # type: ignore[attr-defined] def tearDown(self) -> None: # Native coroutines tend to produce warnings if they're not # allowed to run to completion. It's difficult to ensure that # this always happens in tests, so cancel any tasks that are # still pending by the time we get here. asyncio_loop = self.io_loop.asyncio_loop # type: ignore tasks = asyncio.all_tasks(asyncio_loop) # Tasks that are done may still appear here and may contain # non-cancellation exceptions, so filter them out. tasks = [t for t in tasks if not t.done()] # type: ignore for t in tasks: t.cancel() # Allow the tasks to run and finalize themselves (which means # raising a CancelledError inside the coroutine). This may # just transform the "task was destroyed but it is pending" # warning into a "uncaught CancelledError" warning, but # catching CancelledErrors in coroutines that may leak is # simpler than ensuring that no coroutines leak. if tasks: done, pending = self.io_loop.run_sync(lambda: asyncio.wait(tasks)) assert not pending # If any task failed with anything but a CancelledError, raise it. for f in done: try: f.result() except asyncio.CancelledError: pass # Clean up Subprocess, so it can be used again with a new ioloop. Subprocess.uninitialize() asyncio.set_event_loop(None) if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True) super().tearDown() # In case an exception escaped or the StackContext caught an exception # when there wasn't a wait() to re-raise it, do so here. # This is our last chance to raise an exception in a way that the # unittest machinery understands. self.__rethrow() def get_new_ioloop(self) -> IOLoop: """Returns the `.IOLoop` to use for this test. By default, a new `.IOLoop` is created for each test. Subclasses may override this method to return `.IOLoop.current()` if it is not appropriate to use a new `.IOLoop` in each tests (for example, if there are global singletons using the default `.IOLoop`) or if a per-test event loop is being provided by another system (such as ``pytest-asyncio``). .. deprecated:: 6.3 This method will be removed in Tornado 7.0. """ return IOLoop(make_current=False) def _handle_exception( self, typ: Type[Exception], value: Exception, tb: TracebackType ) -> bool: if self.__failure is None: self.__failure = (typ, value, tb) else: app_log.error( "multiple unhandled exceptions in test", exc_info=(typ, value, tb) ) self.stop() return True def __rethrow(self) -> None: if self.__failure is not None: failure = self.__failure self.__failure = None raise_exc_info(failure) def run( self, result: Optional[unittest.TestResult] = None ) -> Optional[unittest.TestResult]: ret = super().run(result) # As a last resort, if an exception escaped super.run() and wasn't # re-raised in tearDown, raise it here. This will cause the # unittest run to fail messily, but that's better than silently # ignoring an error. self.__rethrow() return ret def _callTestMethod(self, method: Callable) -> None: """Run the given test method, raising an error if it returns non-None. Failure to decorate asynchronous test methods with ``@gen_test`` can lead to tests incorrectly passing. Remove this override when Python 3.10 support is dropped. This check (in the form of a DeprecationWarning) became a part of the standard library in 3.11. Note that ``_callTestMethod`` is not documented as a public interface. However, it is present in all supported versions of Python (3.8+), and if it goes away in the future that's OK because we can just remove this override as noted above. """ # Calling super()._callTestMethod would hide the return value, even in python 3.8-3.10 # where the check isn't being done for us. result = method() if isinstance(result, Generator) or inspect.iscoroutine(result): raise TypeError( "Generator and coroutine test methods should be" " decorated with tornado.testing.gen_test" ) elif result is not None: raise ValueError("Return value from test method ignored: %r" % result) def stop(self, _arg: Any = None, **kwargs: Any) -> None: """Stops the `.IOLoop`, causing one pending (or future) call to `wait()` to return. Keyword arguments or a single positional argument passed to `stop()` are saved and will be returned by `wait()`. .. deprecated:: 5.1 `stop` and `wait` are deprecated; use ``@gen_test`` instead. """ assert _arg is None or not kwargs self.__stop_args = kwargs or _arg if self.__running: self.io_loop.stop() self.__running = False self.__stopped = True def wait( self, condition: Optional[Callable[..., bool]] = None, timeout: Optional[float] = None, ) -> Any: """Runs the `.IOLoop` until stop is called or timeout has passed. In the event of a timeout, an exception will be thrown. The default timeout is 5 seconds; it may be overridden with a ``timeout`` keyword argument or globally with the ``ASYNC_TEST_TIMEOUT`` environment variable. If ``condition`` is not ``None``, the `.IOLoop` will be restarted after `stop()` until ``condition()`` returns ``True``. .. versionchanged:: 3.1 Added the ``ASYNC_TEST_TIMEOUT`` environment variable. .. deprecated:: 5.1 `stop` and `wait` are deprecated; use ``@gen_test`` instead. """ if timeout is None: timeout = get_async_test_timeout() if not self.__stopped: if timeout: def timeout_func() -> None: try: raise self.failureException( "Async operation timed out after %s seconds" % timeout ) except Exception: self.__failure = sys.exc_info() self.stop() self.__timeout = self.io_loop.add_timeout( self.io_loop.time() + timeout, timeout_func ) while True: self.__running = True self.io_loop.start() if self.__failure is not None or condition is None or condition(): break if self.__timeout is not None: self.io_loop.remove_timeout(self.__timeout) self.__timeout = None assert self.__stopped self.__stopped = False self.__rethrow() result = self.__stop_args self.__stop_args = None return result
AsyncTestCase
python
sphinx-doc__sphinx
sphinx/addnodes.py
{ "start": 10374, "end": 10723 }
class ____(desc_sig_element, _sig_element=True): """Node for a space in a signature.""" classes = ['w'] def __init__( self, rawsource: str = '', text: str = ' ', *children: Element, **attributes: Any, ) -> None: super().__init__(rawsource, text, *children, **attributes)
desc_sig_space
python
django-haystack__django-haystack
test_haystack/elasticsearch7_tests/test_backend.py
{ "start": 63580, "end": 67489 }
class ____(TestCase): def setUp(self): super().setUp() # Wipe it clean. clear_elasticsearch_index() # Stow. self.old_ui = connections["elasticsearch"].get_unified_index() self.ui = UnifiedIndex() self.smmi = Elasticsearch7FacetingMockSearchIndex() self.ui.build(indexes=[self.smmi]) connections["elasticsearch"]._index = self.ui self.sb = connections["elasticsearch"].get_backend() # Force the backend to rebuild the mapping each time. self.sb.existing_mapping = {} self.sb.setup() self.sample_objs = [] for i in range(1, 10): mock = AFourthMockModel() mock.id = i if i > 5: mock.editor = "George Taylor" else: mock.editor = "Perry White" if i % 2: mock.author = "Daniel Lindsley" else: mock.author = "Dan Watson" mock.pub_date = datetime.date(2013, 9, (i % 4) + 1) self.sample_objs.append(mock) def tearDown(self): connections["elasticsearch"]._index = self.old_ui super().tearDown() def test_facet(self): self.sb.update(self.smmi, self.sample_objs) counts = ( SearchQuerySet("elasticsearch") .facet("author") .facet("editor") .facet_counts() ) self.assertEqual( counts["fields"]["author"], [("Daniel Lindsley", 5), ("Dan Watson", 4)] ) self.assertEqual( counts["fields"]["editor"], [("Perry White", 5), ("George Taylor", 4)] ) counts = ( SearchQuerySet("elasticsearch") .filter(content="white") .facet("facet_field", order="reverse_count") .facet_counts() ) self.assertEqual( counts["fields"]["facet_field"], [("Dan Watson", 2), ("Daniel Lindsley", 3)] ) def test_multiple_narrow(self): self.sb.update(self.smmi, self.sample_objs) counts = ( SearchQuerySet("elasticsearch") .narrow('editor_exact:"Perry White"') .narrow('author_exact:"Daniel Lindsley"') .facet("author") .facet_counts() ) self.assertEqual(counts["fields"]["author"], [("Daniel Lindsley", 3)]) def test_narrow(self): self.sb.update(self.smmi, self.sample_objs) counts = ( SearchQuerySet("elasticsearch") .facet("author") .facet("editor") .narrow('editor_exact:"Perry White"') .facet_counts() ) self.assertEqual( counts["fields"]["author"], [("Daniel Lindsley", 3), ("Dan Watson", 2)] ) self.assertEqual(counts["fields"]["editor"], [("Perry White", 5)]) def test_date_facet(self): self.sb.update(self.smmi, self.sample_objs) start = datetime.date(2013, 9, 1) end = datetime.date(2013, 9, 30) # Facet by day counts = ( SearchQuerySet("elasticsearch") .date_facet("pub_date", start_date=start, end_date=end, gap_by="day") .facet_counts() ) self.assertEqual( counts["dates"]["pub_date"], [ (datetime.datetime(2013, 9, 1), 2), (datetime.datetime(2013, 9, 2), 3), (datetime.datetime(2013, 9, 3), 2), (datetime.datetime(2013, 9, 4), 2), ], ) # By month counts = ( SearchQuerySet("elasticsearch") .date_facet("pub_date", start_date=start, end_date=end, gap_by="month") .facet_counts() ) self.assertEqual( counts["dates"]["pub_date"], [(datetime.datetime(2013, 9, 1), 9)] )
Elasticsearch7FacetingTestCase
python
readthedocs__readthedocs.org
readthedocs/storage/__init__.py
{ "start": 864, "end": 1013 }
class ____(LazyObject): def _setup(self): self._wrapped = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
ConfiguredBuildMediaStorage
python
great-expectations__great_expectations
great_expectations/data_context/types/base.py
{ "start": 21211, "end": 35487 }
class ____(AbstractConfigSchema): class Meta: unknown = INCLUDE name = fields.String( required=False, allow_none=True, ) id = fields.String( required=False, allow_none=True, ) class_name = fields.String( required=True, allow_none=False, ) module_name = fields.String( required=False, allow_none=True, missing="great_expectations.datasource.data_connector", ) assets = fields.Dict( keys=fields.Str(), values=fields.Nested(AssetConfigSchema, required=False, allow_none=True), required=False, allow_none=True, ) base_directory = fields.String(required=False, allow_none=True) glob_directive = fields.String(required=False, allow_none=True) default_regex = fields.Dict(required=False, allow_none=True) credentials = fields.Raw(required=False, allow_none=True) batch_identifiers = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True) # S3 boto3_options = fields.Dict( keys=fields.Str(), values=fields.Str(), required=False, allow_none=True ) bucket = fields.String(required=False, allow_none=True) max_keys = fields.Integer(required=False, allow_none=True) # Azure azure_options = fields.Dict( keys=fields.Str(), values=fields.Str(), required=False, allow_none=True ) container = fields.String(required=False, allow_none=True) name_starts_with = fields.String(required=False, allow_none=True) # GCS gcs_options = fields.Dict( keys=fields.Str(), values=fields.Str(), required=False, allow_none=True ) bucket_or_name = fields.String(required=False, allow_none=True) max_results = fields.String(required=False, allow_none=True) # Both S3/GCS prefix = fields.String(required=False, allow_none=True) # Both S3/Azure delimiter = fields.String(required=False, allow_none=True) data_asset_name_prefix = fields.String(required=False, allow_none=True) data_asset_name_suffix = fields.String(required=False, allow_none=True) include_schema_name = fields.Boolean(required=False, allow_none=True) partitioner_method = fields.String(required=False, allow_none=True) partitioner_kwargs = fields.Dict(required=False, allow_none=True) sorters = fields.List( cls_or_instance=fields.Nested(SorterConfigSchema, required=False, allow_none=True), required=False, allow_none=True, ) sampling_method = fields.String(required=False, allow_none=True) sampling_kwargs = fields.Dict(required=False, allow_none=True) excluded_tables = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True) included_tables = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True) skip_inapplicable_tables = fields.Boolean(required=False, allow_none=True) introspection_directives = fields.Dict(required=False, allow_none=True) batch_spec_passthrough = fields.Dict(required=False, allow_none=True) # AWS Glue Data Catalog glue_introspection_directives = fields.Dict(required=False, allow_none=True) catalog_id = fields.String(required=False, allow_none=True) partitions = fields.List(cls_or_instance=fields.Str(), required=False, allow_none=True) # noinspection PyUnusedLocal @validates_schema def validate_schema(self, data, **kwargs): # noqa: C901, PLR0912 # FIXME CoP # If a class_name begins with the dollar sign ("$"), then it is assumed to be a variable name to be substituted. # noqa: E501 # FIXME CoP if data["class_name"][0] == "$": return if ("default_regex" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetFilesystemDataConnector", "ConfiguredAssetFilesystemDataConnector", "InferredAssetS3DataConnector", "ConfiguredAssetS3DataConnector", "InferredAssetAzureDataConnector", "ConfiguredAssetAzureDataConnector", "InferredAssetGCSDataConnector", "ConfiguredAssetGCSDataConnector", "InferredAssetDBFSDataConnector", "ConfiguredAssetDBFSDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by a subclass of the FilePathDataConnector class (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ("glob_directive" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetFilesystemDataConnector", "ConfiguredAssetFilesystemDataConnector", "InferredAssetDBFSDataConnector", "ConfiguredAssetDBFSDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by a filesystem type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ("delimiter" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetS3DataConnector", "ConfiguredAssetS3DataConnector", "InferredAssetAzureDataConnector", "ConfiguredAssetAzureDataConnector", "InferredAssetGCSDataConnector", "ConfiguredAssetGCSDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an S3/Azure/GCS type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration \ to continue. """ # noqa: E501 # FIXME CoP ) if ("prefix" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetS3DataConnector", "ConfiguredAssetS3DataConnector", "InferredAssetGCSDataConnector", "ConfiguredAssetGCSDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an S3/GCS type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ("bucket" in data or "max_keys" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetS3DataConnector", "ConfiguredAssetS3DataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an S3 type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ("azure_options" in data or "container" in data or "name_starts_with" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetAzureDataConnector", "ConfiguredAssetAzureDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an Azure type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if "azure_options" in data and data["class_name"] in [ "InferredAssetAzureDataConnector", "ConfiguredAssetAzureDataConnector", ]: azure_options = data["azure_options"] if not (("conn_str" in azure_options) ^ ("account_url" in azure_options)): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP """Your current configuration is either missing methods of authentication or is using too many for \ the Azure type of data connector. You must only select one between `conn_str` or `account_url`. Please update your \ configuration to continue. """ # noqa: E501 # FIXME CoP ) if ("gcs_options" in data or "bucket_or_name" in data or "max_results" in data) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetGCSDataConnector", "ConfiguredAssetGCSDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by a GCS type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if "gcs_options" in data and data["class_name"] in [ "InferredAssetGCSDataConnector", "ConfiguredAssetGCSDataConnector", ]: gcs_options = data["gcs_options"] if "filename" in gcs_options and "info" in gcs_options: raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP """Your current configuration can only use a single method of authentication for the GCS type of \ data connector. You must only select one between `filename` (from_service_account_file) and `info` \ (from_service_account_info). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ( "include_schema_name" in data or "partitioner_method" in data or "partitioner_kwargs" in data or "sampling_method" in data or "sampling_kwargs" in data or "skip_inapplicable_tables" in data ) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetSqlDataConnector", "ConfiguredAssetSqlDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an SQL type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ( "data_asset_name_prefix" in data or "data_asset_name_suffix" in data or "excluded_tables" in data or "included_tables" in data ) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetSqlDataConnector", "ConfiguredAssetSqlDataConnector", "InferredAssetAWSGlueDataCatalogDataConnector", "ConfiguredAssetAWSGlueDataCatalogDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an SQL/GlueCatalog type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) if ( "partitions" in data or "catalog_id" in data or "glue_introspection_directives" in data ) and not ( data["class_name"] # noqa: E713 # membership check in [ "InferredAssetAWSGlueDataCatalogDataConnector", "ConfiguredAssetAWSGlueDataCatalogDataConnector", ] ): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP f"""Your current configuration uses one or more keys in a data connector that are required only by an GlueCatalog type of the data connector (your data connector is "{data["class_name"]}"). Please update your configuration to continue. """ # noqa: E501 # FIXME CoP ) # noinspection PyUnusedLocal @post_load def make_data_connector_config(self, data, **kwargs): return DataConnectorConfig(**data) @pre_dump def prepare_dump(self, data, **kwargs): """ Schemas in Spark Dataframes are defined as StructType, which is not serializable This method calls the schema's jsonValue() method, which translates the object into a json """ # check whether spark exists try: if (not pyspark.types) or (pyspark.types.StructType is None): return data except AttributeError: return data batch_spec_passthrough_config = data.get("batch_spec_passthrough") if batch_spec_passthrough_config: reader_options: dict = batch_spec_passthrough_config.get("reader_options") if reader_options: schema = reader_options.get("schema") if schema and pyspark.types and isinstance(schema, pyspark.types.StructType): data["batch_spec_passthrough"]["reader_options"]["schema"] = schema.jsonValue() return data
DataConnectorConfigSchema
python
pytorch__pytorch
test/inductor/test_custom_post_grad_passes.py
{ "start": 2079, "end": 2338 }
class ____(CustomGraphPass): def __init__(self) -> None: super().__init__() def __call__(self, g: torch.fx.graph.Graph): change_cos_pass(g) def uuid(self) -> bytes: return get_hash_for_files((__file__,))
ChangeCosCustomPass
python
Netflix__metaflow
metaflow/client/core.py
{ "start": 36254, "end": 37453 }
class ____(object): """ Container of data artifacts produced by a `Task`. This object is instantiated through `Task.data`. `MetaflowData` allows results to be retrieved by their name through a convenient dot notation: ```python Task(...).data.my_object ``` You can also test the existence of an object ```python if 'my_object' in Task(...).data: print('my_object found') ``` Note that this container relies on the local cache to load all data artifacts. If your `Task` contains a lot of data, a more efficient approach is to load artifacts individually like so ``` Task(...)['my_object'].data ``` """ def __init__(self, artifacts: Iterable[DataArtifact]): self._artifacts = dict((art.id, art) for art in artifacts) def __getattr__(self, name: str): if name not in self._artifacts: raise AttributeError(name) return self._artifacts[name].data def __contains__(self, var): return var in self._artifacts def __str__(self): return "<MetaflowData: %s>" % ", ".join(self._artifacts) def __repr__(self): return str(self)
MetaflowData
python
django__django
tests/admin_views/tests.py
{ "start": 309585, "end": 313991 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", password="secret", email="super@example.com" ) def setUp(self): self.client.force_login(self.superuser) def test_limit_choices_to(self): """Regression test for 14880""" actor = Actor.objects.create(name="Palin", age=27) Inquisition.objects.create(expected=True, leader=actor, country="England") Inquisition.objects.create(expected=False, leader=actor, country="Spain") response = self.client.get(reverse("admin:admin_views_sketch_add")) # Find the link m = re.search( rb'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content ) self.assertTrue(m) # Got a match popup_url = m[1].decode().replace("&amp;", "&") # Handle relative links popup_url = urljoin(response.request["PATH_INFO"], popup_url) # Get the popup and verify the correct objects show up in the resulting # page. This step also tests integers, strings and booleans in the # lookup query string; in model we define inquisition field to have a # limit_choices_to option that includes a filter on a string field # (inquisition__actor__name), a filter on an integer field # (inquisition__actor__age), and a filter on a boolean field # (inquisition__expected). response2 = self.client.get(popup_url) self.assertContains(response2, "Spain") self.assertNotContains(response2, "England") def test_limit_choices_to_isnull_false(self): """Regression test for 20182""" Actor.objects.create(name="Palin", age=27) Actor.objects.create(name="Kilbraken", age=50, title="Judge") response = self.client.get(reverse("admin:admin_views_sketch_add")) # Find the link m = re.search( rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content ) self.assertTrue(m) # Got a match popup_url = m[1].decode().replace("&amp;", "&") # Handle relative links popup_url = urljoin(response.request["PATH_INFO"], popup_url) # Get the popup and verify the correct objects show up in the resulting # page. This step tests field__isnull=0 gets parsed correctly from the # lookup query string; in model we define defendant0 field to have a # limit_choices_to option that includes "actor__title__isnull=False". response2 = self.client.get(popup_url) self.assertContains(response2, "Kilbraken") self.assertNotContains(response2, "Palin") def test_limit_choices_to_isnull_true(self): """Regression test for 20182""" Actor.objects.create(name="Palin", age=27) Actor.objects.create(name="Kilbraken", age=50, title="Judge") response = self.client.get(reverse("admin:admin_views_sketch_add")) # Find the link m = re.search( rb'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content ) self.assertTrue(m) # Got a match popup_url = m[1].decode().replace("&amp;", "&") # Handle relative links popup_url = urljoin(response.request["PATH_INFO"], popup_url) # Get the popup and verify the correct objects show up in the resulting # page. This step tests field__isnull=1 gets parsed correctly from the # lookup query string; in model we define defendant1 field to have a # limit_choices_to option that includes "actor__title__isnull=True". response2 = self.client.get(popup_url) self.assertNotContains(response2, "Kilbraken") self.assertContains(response2, "Palin") def test_list_display_method_same_name_as_reverse_accessor(self): """ Should be able to use a ModelAdmin method in list_display that has the same name as a reverse model field ("sketch" in this case). """ actor = Actor.objects.create(name="Palin", age=27) Inquisition.objects.create(expected=True, leader=actor, country="England") response = self.client.get(reverse("admin:admin_views_inquisition_changelist")) self.assertContains(response, "list-display-sketch") @override_settings(ROOT_URLCONF="admin_views.urls")
RawIdFieldsTest
python
plotly__plotly.py
plotly/graph_objs/layout/newshape/_line.py
{ "start": 235, "end": 4451 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.newshape" _path_str = "layout.newshape.line" _valid_props = {"color", "dash", "width"} @property def color(self): """ Sets the line color. By default uses either dark grey or white to increase contrast with background color. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def dash(self): """ Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). The 'dash' property is an enumeration that may be specified as: - One of the following dash styles: ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot'] - A string containing a dash length list in pixels or percentages (e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.) Returns ------- str """ return self["dash"] @dash.setter def dash(self, val): self["dash"] = val @property def width(self): """ Sets the line width (in px). The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ color Sets the line color. By default uses either dark grey or white to increase contrast with background color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). width Sets the line width (in px). """ def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.newshape.Line` color Sets the line color. By default uses either dark grey or white to increase contrast with background color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). width Sets the line width (in px). Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.newshape.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.newshape.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("dash", arg, dash) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
pytorch__pytorch
test/distributed/pipelining/test_schedule.py
{ "start": 37366, "end": 38715 }
class ____(TestCase): def test_valid_schedule(self): schedule_actions = [ { 0: [_Action(0, F, 0), _Action(0, B, 0)], 1: [_Action(1, F, 0), _Action(1, B, 0)], }, { 0: [_Action(0, F, 0), _Action(0, I, 0), _Action(0, W, 0)], 1: [_Action(1, F, 0), _Action(1, I, 0), _Action(1, W, 0)], }, ] pp_group_size = 2 num_stages = 2 num_microbatches = 1 for actions in schedule_actions: _validate_schedule(actions, pp_group_size, num_stages, num_microbatches) def test_invalid_schedule_missing_rank(self): actions = { 0: [_Action(0, F, 0), _Action(0, B, 0)], } pp_group_size = 2 num_stages = 2 num_microbatches = 1 with self.assertRaises(AssertionError): _validate_schedule(actions, pp_group_size, num_stages, num_microbatches) def test_invalid_schedule_missing_action(self): actions = { 0: [_Action(0, F, 0)], 1: [_Action(1, F, 0)], } pp_group_size = 2 num_stages = 2 num_microbatches = 1 with self.assertRaises(AssertionError): _validate_schedule(actions, pp_group_size, num_stages, num_microbatches)
TestValidateSchedule
python
hyperopt__hyperopt
hyperopt/base.py
{ "start": 25111, "end": 34756 }
class ____: """Picklable representation of search space and evaluation function.""" rec_eval_print_node_on_error = False # -- the Ctrl object is not used directly, but rather # a live Ctrl instance is inserted for the pyll_ctrl # in self.evaluate so that it can be accessed from within # the pyll graph describing the search space. pyll_ctrl = pyll.as_apply(Ctrl) def __init__( self, fn, expr, workdir=None, pass_expr_memo_ctrl=None, name=None, loss_target=None, ): """ Parameters ---------- fn : callable This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`) expr : hyperopt.pyll.Apply This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`) workdir : string (or None) If non-None, the current working directory will be `workdir`while `expr` and `fn` are evaluated. (XXX Currently only respected by jobs run via MongoWorker) pass_expr_memo_ctrl : bool If True, `fn` will be called like this: `fn(self.expr, memo, ctrl)`, where `memo` is a dictionary mapping `Apply` nodes to their computed values, and `ctrl` is a `Ctrl` instance for communicating with a Trials database. This lower-level calling convention is useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself in some customized way. name : string (or None) Label, used for pretty-printing. loss_target : float (or None) The actual or estimated minimum of `fn`. Some optimization algorithms may behave differently if their first objective is to find an input that achieves a certain value, rather than the more open-ended objective of pure minimization. XXX: Move this from Domain to be an fmin arg. """ self.fn = fn if pass_expr_memo_ctrl is None: self.pass_expr_memo_ctrl = getattr(fn, "fmin_pass_expr_memo_ctrl", False) else: self.pass_expr_memo_ctrl = pass_expr_memo_ctrl self.expr = pyll.as_apply(expr) self.params = {} for node in pyll.dfs(self.expr): if node.name == "hyperopt_param": label = node.arg["label"].obj if label in self.params: raise DuplicateLabel(label) self.params[label] = node.arg["obj"] self.loss_target = loss_target self.name = name self.workdir = workdir self.s_new_ids = pyll.Literal("new_ids") # -- list at eval-time before = pyll.dfs(self.expr) # -- raises exception if expr contains cycles pyll.toposort(self.expr) vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids) # -- raises exception if v_expr contains cycles pyll.toposort(vh.v_expr) idxs_by_label = vh.idxs_by_label() vals_by_label = vh.vals_by_label() after = pyll.dfs(self.expr) # -- try to detect if VectorizeHelper screwed up anything inplace assert before == after assert set(idxs_by_label.keys()) == set(vals_by_label.keys()) assert set(idxs_by_label.keys()) == set(self.params.keys()) self.s_rng = pyll.Literal("rng-placeholder") # -- N.B. operates inplace: self.s_idxs_vals = recursive_set_rng_kwarg( pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng ) # -- raises an exception if no topological ordering exists pyll.toposort(self.s_idxs_vals) # -- Protocol for serialization. # self.cmd indicates to e.g. MongoWorker how this domain # should be [un]serialized. # XXX This mechanism deserves review as support for ipython # workers improves. self.cmd = ("domain_attachment", "FMinIter_Domain") def memo_from_config(self, config): memo = {} for node in pyll.dfs(self.expr): if node.name == "hyperopt_param": label = node.arg["label"].obj # -- hack because it's not really garbagecollected # this does have the desired effect of crashing the # function if rec_eval actually needs a value that # the the optimization algorithm thought to be unnecessary memo[node] = config.get(label, pyll.base.GarbageCollected) return memo def evaluate(self, config, ctrl, attach_attachments=True): memo = self.memo_from_config(config) use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo) if self.pass_expr_memo_ctrl: rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl) else: # -- the "work" of evaluating `config` can be written # either into the pyll part (self.expr) # or the normal Python part (self.fn) pyll_rval = pyll.rec_eval( self.expr, memo=memo, print_node_on_error=self.rec_eval_print_node_on_error, ) rval = self.fn(pyll_rval) if isinstance(rval, (float, int, np.number)): dict_rval = {"loss": float(rval), "status": STATUS_OK} else: dict_rval = dict(rval) status = dict_rval["status"] if status not in STATUS_STRINGS: raise InvalidResultStatus(dict_rval) if status == STATUS_OK: # -- make sure that the loss is present and valid try: dict_rval["loss"] = float(dict_rval["loss"]) except (TypeError, KeyError): raise InvalidLoss(dict_rval) if attach_attachments: attachments = dict_rval.pop("attachments", {}) for key, val in list(attachments.items()): ctrl.attachments[key] = val # -- don't do this here because SON-compatibility is only a requirement # for trials destined for a mongodb. In-memory rvals can contain # anything. return dict_rval def evaluate_async(self, config, ctrl, attach_attachments=True): """ this is the first part of async evaluation for ipython parallel engines (see ipy.py) This breaks evaluate into two parts to allow for the apply_async call to only pass the objective function and arguments. """ memo = self.memo_from_config(config) use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo) if self.pass_expr_memo_ctrl: pyll_rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl) else: # -- the "work" of evaluating `config` can be written # either into the pyll part (self.expr) # or the normal Python part (self.fn) pyll_rval = pyll.rec_eval( self.expr, memo=memo, print_node_on_error=self.rec_eval_print_node_on_error, ) return (self.fn, pyll_rval) def evaluate_async2(self, rval, ctrl, attach_attachments=True): """ this is the second part of async evaluation for ipython parallel engines (see ipy.py) """ if isinstance(rval, (float, int, np.number)): dict_rval = {"loss": float(rval), "status": STATUS_OK} else: dict_rval = dict(rval) status = dict_rval["status"] if status not in STATUS_STRINGS: raise InvalidResultStatus(dict_rval) if status == STATUS_OK: # -- make sure that the loss is present and valid try: dict_rval["loss"] = float(dict_rval["loss"]) except (TypeError, KeyError): raise InvalidLoss(dict_rval) if attach_attachments: attachments = dict_rval.pop("attachments", {}) for key, val in list(attachments.items()): ctrl.attachments[key] = val # -- don't do this here because SON-compatibility is only a requirement # for trials destined for a mongodb. In-memory rvals can contain # anything. return dict_rval def short_str(self): return "Domain{%s}" % str(self.fn) def loss(self, result, config=None): """Extract the scalar-valued loss from a result document""" return result.get("loss", None) def loss_variance(self, result, config=None): """Return the variance in the estimate of the loss""" return result.get("loss_variance", 0.0) def true_loss(self, result, config=None): """Return a true loss, in the case that the `loss` is a surrogate""" # N.B. don't use get() here, it evaluates self.loss un-necessarily try: return result["true_loss"] except KeyError: return self.loss(result, config=config) def true_loss_variance(self, config=None): """Return the variance in true loss, in the case that the `loss` is a surrogate. """ raise NotImplementedError() def status(self, result, config=None): """Extract the job status from a result document""" return result["status"] def new_result(self): """Return a JSON-encodable object to serve as the 'result' for new jobs. """ return {"status": STATUS_NEW} # -- flake8 doesn't like blank last line
Domain
python
huggingface__transformers
src/transformers/models/falcon/modeling_falcon.py
{ "start": 45797, "end": 50482 }
class ____(FalconPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "transformer.word_embeddings.weight"} def __init__(self, config: FalconConfig): super().__init__(config) self.transformer = FalconModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep lm_logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" The Falcon Model transformer with a sequence classification head on top (linear layer). [`FalconForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ )
FalconForCausalLM
python
pennersr__django-allauth
tests/apps/account/test_models.py
{ "start": 145, "end": 1146 }
class ____(AbstractUser): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) class Meta(AbstractUser.Meta): # type: ignore[name-defined] swappable = "AUTH_USER_MODEL" app_label = "dummy" def test_add_new_email(rf, user, settings): settings.ACCOUNT_CHANGE_EMAIL = True request = rf.get("/") assert EmailAddress.objects.filter(user=user).count() == 1 new_email = EmailAddress.objects.add_new_email( request, user, "new@email.org", send_verification=False ) assert not new_email.verified assert not new_email.primary assert EmailAddress.objects.filter(user=user).count() == 2 EmailAddress.objects.add_new_email( request, user, "new2@email.org", send_verification=False ) assert EmailAddress.objects.filter(user=user).count() == 2 assert not EmailAddress.objects.filter(pk=new_email.pk).exists() assert EmailAddress.objects.filter(email="new2@email.org", verified=False).exists()
UUIDUser
python
getsentry__sentry
src/sentry/integrations/jira/client.py
{ "start": 814, "end": 8944 }
class ____(ApiClient): # TODO: Update to v3 endpoints COMMENTS_URL = "/rest/api/2/issue/%s/comment" COMMENT_URL = "/rest/api/2/issue/%s/comment/%s" STATUS_URL = "/rest/api/2/status" CREATE_URL = "/rest/api/2/issue" ISSUE_URL = "/rest/api/2/issue/%s" META_URL = "/rest/api/2/issue/createmeta" PRIORITIES_URL = "/rest/api/2/priority" PROJECTS_PAGINATED_URL = "/rest/api/2/project/search" PROJECT_URL = "/rest/api/2/project" SEARCH_URL = "/rest/api/2/search/jql/" VERSIONS_URL = "/rest/api/2/project/%s/versions" USERS_URL = "/rest/api/2/user/assignable/search" USER_URL = "/rest/api/2/user" SERVER_INFO_URL = "/rest/api/2/serverInfo" STATUS_SEARCH_URL = "/rest/api/2/statuses/search" ASSIGN_URL = "/rest/api/2/issue/%s/assignee" TRANSITION_URL = "/rest/api/2/issue/%s/transitions" EMAIL_URL = "/rest/api/3/user/email" AUTOCOMPLETE_URL = "/rest/api/2/jql/autocompletedata/suggestions" PROPERTIES_URL = "/rest/api/3/issue/%s/properties/%s" integration_name = IntegrationProviderSlug.JIRA.value # This timeout is completely arbitrary. Jira doesn't give us any # caching headers to work with. Ideally we want a duration that # lets the user make their second jira issue with cached data. cache_time = 240 def __init__( self, integration: RpcIntegration | Integration, verify_ssl: bool, logging_context: Any | None = None, ): self.base_url = integration.metadata["base_url"] self.shared_secret = integration.metadata["shared_secret"] super().__init__( integration_id=integration.id, verify_ssl=verify_ssl, logging_context=logging_context, ) def finalize_request(self, prepared_request: PreparedRequest): assert prepared_request.url is not None assert prepared_request.method is not None path = prepared_request.url[len(self.base_url) :] url_params = dict(parse_qs(urlsplit(path).query)) path = path.split("?")[0] jwt_payload = { "iss": JIRA_KEY, "iat": datetime.datetime.utcnow(), "exp": datetime.datetime.utcnow() + datetime.timedelta(seconds=5 * 60), "qsh": get_query_hash( uri=path, method=prepared_request.method.upper(), query_params=url_params, ), } encoded_jwt = jwt.encode(jwt_payload, self.shared_secret) prepared_request.headers["Authorization"] = f"JWT {encoded_jwt}" return prepared_request def get_cache_prefix(self) -> str: return "sentry-jira-2:" def user_id_get_param(self): """ Jira-Cloud requires GDPR compliant API usage so we have to use accountId """ return "accountId" def user_id_field(self): """ Jira-Cloud requires GDPR compliant API usage so we have to use accountId """ return "accountId" def user_query_param(self): """ Jira-Cloud requires GDPR compliant API usage so we have to use query """ return "query" def get_issue(self, issue_id): return self.get(self.ISSUE_URL % (issue_id,)) def search_issues(self, query): q = query.replace('"', '\\"') # check if it looks like an issue id if ISSUE_KEY_RE.match(query): jql = f'id="{q}"' else: jql = f'text ~ "{q}"' return self.get(self.SEARCH_URL, params={"jql": jql, "fields": "*all"}) def create_comment(self, issue_key, comment): return self.post(self.COMMENTS_URL % issue_key, data={"body": comment}) def update_comment(self, issue_key, comment_id, comment): return self.put(self.COMMENT_URL % (issue_key, comment_id), data={"body": comment}) def get_projects_paginated(self, params: dict[str, Any] | None = None): response = self.get(self.PROJECTS_PAGINATED_URL, params=params) return response def get_projects_list(self): """deprecated - please use paginated projects endpoint""" return self.get_cached(self.PROJECT_URL) def get_project_key_for_id(self, project_id) -> str: if not project_id: return "" projects = self.get_projects_list() for project in projects: if project["id"] == project_id: return project["key"] return "" def get_create_meta_for_project(self, project): params = {"expand": "projects.issuetypes.fields", "projectIds": project} metas = self.get_cached(self.META_URL, params=params) # We saw an empty JSON response come back from the API :( if not metas: logger.info( "jira.get-create-meta.empty-response", extra={"base_url": self.base_url, "project": project}, ) return None # XXX(dcramer): document how this is possible, if it even is if len(metas["projects"]) > 1: raise ApiError(f"More than one project found matching {project}.") try: return metas["projects"][0] except IndexError: logger.info( "jira.get-create-meta.key-error", extra={"base_url": self.base_url, "project": project}, ) return None def get_versions(self, project): return self.get_cached(self.VERSIONS_URL % project) def get_priorities(self): return self.get_cached(self.PRIORITIES_URL) def search_users_for_project(self, project, username): # Jira Server wants a project key, while cloud is indifferent. project_key = self.get_project_key_for_id(project) return self.get_cached( self.USERS_URL, params={"project": project_key, self.user_query_param(): username} ) def search_users_for_issue(self, issue_key, email): return self.get_cached( self.USERS_URL, params={"issueKey": issue_key, self.user_query_param(): email} ) def get_user(self, user_id): user_id_get_param = self.user_id_get_param() return self.get_cached(self.USER_URL, params={user_id_get_param: user_id}) def create_issue(self, raw_form_data): data = {"fields": raw_form_data} return self.post(self.CREATE_URL, data=data) def get_server_info(self): return self.get(self.SERVER_INFO_URL) def get_valid_statuses(self): return self.get_cached(self.STATUS_URL) def get_transitions(self, issue_key): return self.get_cached(self.TRANSITION_URL % issue_key)["transitions"] def transition_issue(self, issue_key, transition_id): return self.post( self.TRANSITION_URL % issue_key, data={"transition": {"id": transition_id}} ) def assign_issue(self, key, name_or_account_id): user_id_field = self.user_id_field() return self.put(self.ASSIGN_URL % key, data={user_id_field: name_or_account_id}) def set_issue_property(self, issue_key, badge_num): module_key = "sentry-issues-glance" properties_key = f"com.atlassian.jira.issue:{JIRA_KEY}:{module_key}:status" data = {"type": "badge", "value": {"label": badge_num}} return self.put(self.PROPERTIES_URL % (issue_key, properties_key), data=data) def get_email(self, account_id): user = self.get_cached(self.EMAIL_URL, params={"accountId": account_id}) return user.get("email") def get_field_autocomplete(self, name, value): if name.startswith(CUSTOMFIELD_PREFIX): # Transform `customfield_0123` into `cf[0123]` cf_id = name[len(CUSTOMFIELD_PREFIX) :] jql_name = f"cf[{cf_id}]" else: jql_name = name return self.get_cached( self.AUTOCOMPLETE_URL, params={"fieldName": jql_name, "fieldValue": value} ) def get_project_statuses(self, project_id: str) -> dict[str, Any]: return dict(self.get_cached(self.STATUS_SEARCH_URL, params={"projectId": project_id}))
JiraCloudClient
python
numba__numba
numba/tests/test_mandelbrot.py
{ "start": 262, "end": 595 }
class ____(unittest.TestCase): def test_mandelbrot(self): pyfunc = is_in_mandelbrot cfunc = njit((types.complex64,))(pyfunc) points = [0+0j, 1+0j, 0+1j, 1+1j, 0.1+0.1j] for p in points: self.assertEqual(cfunc(p), pyfunc(p)) if __name__ == '__main__': unittest.main()
TestMandelbrot
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDict18.py
{ "start": 1650, "end": 1858 }
class ____(TD7[Literal[1]]): ... def func6(a: TD8) -> Literal[1]: return a["x"] func6({"x": 1, "y": 1, "z": "a"}) f4: TD8 = {"x": 1, "y": 1, "z": "a"} reveal_type(func6({"x": 1, "y": 1, "z": "a"}))
TD8
python
spyder-ide__spyder
spyder/plugins/remoteclient/api/manager/ssh.py
{ "start": 939, "end": 14358 }
class ____(SpyderRemoteAPIManagerBase): """Class to manage a remote SSH server and its APIs.""" _extra_options = ["platform", "id", "default_kernel_spec"] START_SERVER_COMMAND = ( f"/${{HOME}}/.local/bin/micromamba run -n {SERVER_ENV} spyder-server" ) GET_SERVER_INFO_COMMAND = ( f"/${{HOME}}/.local/bin/micromamba run" f" -n {SERVER_ENV} spyder-server info" ) def __init__(self, conf_id, options: SSHClientOptions, _plugin=None): super().__init__(conf_id, options, _plugin) self._ssh_connection: asyncssh.SSHClientConnection = None self._remote_server_process: asyncssh.SSHClientProcess = None self._port_forwarder: asyncssh.SSHListener = None self._server_info = {} self._local_port = None @property def api_token(self): return self._server_info.get("token") @property def server_port(self): return self._server_info.get("port") @property def server_pid(self): return self._server_info.get("pid") @property def port_is_forwarded(self): """Check if local port is forwarded.""" return self._port_forwarder is not None @property def server_url(self): if not self._local_port: raise ValueError("Local port is not set") return f"http://127.0.0.1:{self._local_port}" @property def peer_host(self): if self._ssh_connection is not None: return self._ssh_connection.get_extra_info("peername")[0] else: return None @property def peer_port(self): if not self.connected: return None return self._ssh_connection.get_extra_info("peername")[1] @property def peer_username(self): if not self.connected: return None return self._ssh_connection.get_extra_info("username") @property def client_factory(self): """Return the client factory.""" return lambda: SpyderSSHClient(self) async def get_server_info(self): """Check if the remote server is running.""" if self._ssh_connection is None: self.logger.debug("ssh connection was not established") return None try: output = await self._ssh_connection.run( self.GET_SERVER_INFO_COMMAND, check=True ) except asyncssh.TimeoutError: self.logger.error("Getting server info timed out") return None except asyncssh.misc.ChannelOpenError: self.logger.error( "The connection is closed, so it's not possible to get the " "server info" ) return None except asyncssh.ProcessError as err: self.logger.debug(f"Error getting server info: {err.stderr}") return None try: info = json.loads(output.stdout.splitlines()[-1]) except (json.JSONDecodeError, IndexError): self.logger.debug(f"Issue parsing server info: {output.stdout}") return None return info async def _start_remote_server(self): """Start remote server.""" if not self.connected: self.logger.error("SSH connection is not open") self._emit_connection_status( ConnectionStatus.Error, _("The SSH connection is not open"), ) return False if info := await self.get_server_info(): self.logger.warning( f"Remote server is already running for {self.peer_host}" ) self.logger.debug("Checking server info") if self._server_info != info: self._server_info = info self.logger.info( "Different server info, updating info " f"for {self.peer_host}" ) if await self.forward_local_port(): self._emit_connection_status( ConnectionStatus.Active, _("Spyder remote services are active"), ) return True self.logger.error( "Error forwarding local port, server might not be " "reachable" ) self._emit_connection_status( ConnectionStatus.Error, _("It was not possible to forward the local port"), ) self._emit_connection_status( ConnectionStatus.Active, _("Spyder remote services are active"), ) return True self.logger.debug(f"Starting remote server for {self.peer_host}") try: self._remote_server_process = ( await self._ssh_connection.create_process( self.START_SERVER_COMMAND, stderr=asyncssh.STDOUT, ) ) except (OSError, asyncssh.Error, ValueError) as e: self.logger.error(f"Error starting remote server: {e}") self._remote_server_process = None self._emit_connection_status( ConnectionStatus.Error, _("Error starting the remote server") ) return False _time = 0 while (info := await self.get_server_info()) is None and _time < 5: await asyncio.sleep(1) _time += 1 if info is None: self.logger.error("Faield to get server info") self._emit_connection_status( ConnectionStatus.Error, _( "There was an error when trying to get the remote server " "information" ), ) return False self._server_info = info self.logger.info( f"Remote server started for {self.peer_host} at port " f"{self.server_port}" ) if await self.forward_local_port(): self._emit_connection_status( ConnectionStatus.Active, _("Spyder remote services are active"), ) return True self.logger.error("Error forwarding local port.") self._emit_connection_status( ConnectionStatus.Error, _("It was not possible to forward the local port"), ) return False async def ensure_server_installed(self) -> bool: """Check remote server version.""" if not self.connected: self.logger.error("SSH connection is not open") self._emit_connection_status( ConnectionStatus.Error, _("The SSH connection is not open"), ) return False commnad = get_server_version_command(self.options["platform"]) try: output = await self._ssh_connection.run(commnad, check=True) except asyncssh.ProcessError as err: # Server is not installed self.logger.warning(f"Issue checking server version: {err.stderr}") return await self.install_remote_server() version = output.stdout.splitlines()[-1].strip() if Version(version) >= Version(SPYDER_REMOTE_MAX_VERSION): self.logger.error( f"Server version mismatch: {version} is greater than " f"the maximum supported version {SPYDER_REMOTE_MAX_VERSION}" ) self._emit_version_mismatch(version) self._emit_connection_status( status=ConnectionStatus.Error, message=_("Error connecting to the remote server"), ) return False if Version(version) < Version(SPYDER_REMOTE_MIN_VERSION): self.logger.warning( f"Server version mismatch: {version} is lower than " f"the minimum supported version {SPYDER_REMOTE_MIN_VERSION}. " f"A more recent version will be installed." ) return await self.install_remote_server() self.logger.info(f"Supported Server version: {version}") return True async def _install_remote_server(self): """Install remote server.""" if not self.connected: self.logger.error("SSH connection is not open") self._emit_connection_status( ConnectionStatus.Error, _("The SSH connection is not open"), ) return False self.logger.debug( f"Installing spyder-remote-server on {self.peer_host}" ) try: command = get_installer_command(self.options["platform"]) except NotImplementedError: self.logger.exception( f"Cannot install spyder-remote-server on " f"{self.options['platform']} automatically. Please install it " f"manually." ) self._emit_connection_status( status=ConnectionStatus.Error, message=_("There was an error installing the remote server"), ) return False try: await self._ssh_connection.run(command, check=True) except asyncssh.ProcessError as err: self.logger.exception(f"Installation script failed: {err.stderr}") self._emit_connection_status( status=ConnectionStatus.Error, message=_("There was an error installing the remote server"), ) return False self.logger.info( f"Successfully installed spyder-remote-server on {self.peer_host}" ) return True async def _create_new_connection(self) -> bool: """Create a new SSH connection to the remote server machine. Args ---- options: dict[str, str] The options to use for the SSH connection. Returns ------- bool True if the connection was successful, False otherwise. """ connect_kwargs = { k: v for k, v in self.options.items() if k not in self._extra_options } self.logger.debug("Opening SSH connection") self._ssh_connection = await asyncssh.connect( **connect_kwargs, client_factory=self.client_factory ) self.logger.info("SSH connection established for %s", self.peer_host) return True async def forward_local_port(self): """Forward local port.""" if not self.server_port: self.logger.error("Server port is not set") self._emit_connection_status( status=ConnectionStatus.Error, message=_("The server port is not set"), ) return False if not self.connected: self.logger.error("SSH connection is not open") self._emit_connection_status( status=ConnectionStatus.Error, message=_("The SSH connection is not open"), ) return False self.logger.debug( f"Forwarding an free local port to remote port {self.server_port}" ) if self._port_forwarder: self.logger.warning( f"Port forwarder is already open for host {self.peer_host} " f"with local port {self._local_port} and remote port " f"{self.server_port}" ) await self.close_port_forwarder() local_port = self.get_free_port() server_host = self._server_info["hostname"] self._port_forwarder = await self._ssh_connection.forward_local_port( "", local_port, server_host, self.server_port, ) self._local_port = local_port self.logger.debug( f"Forwarded local port {local_port} to remote server at " f"{server_host}:{self.server_port}" ) return True async def close_port_forwarder(self): """Close port forwarder.""" if self.port_is_forwarded: self.logger.debug( f"Closing port forwarder for host {self.peer_host} with local " f"port {self._local_port}" ) self._port_forwarder.close() await self._port_forwarder.wait_closed() self._port_forwarder = None self.logger.debug( f"Port forwarder closed for host {self.peer_host} with local " f"port {self._local_port}" ) async def close_connection(self): """Close SSH connection.""" if not self.connected: self.logger.debug("SSH connection is not open") return await self.close_port_forwarder() self.logger.debug(f"Closing SSH connection for {self.peer_host}") self._ssh_connection.close() await self._ssh_connection.wait_closed() self._ssh_connection = None self.logger.info("SSH connection closed") self._reset_connection_established() self._emit_connection_status( ConnectionStatus.Inactive, _("The connection was closed successfully"), )
SpyderRemoteSSHAPIManager
python
django__django
tests/many_to_many/models.py
{ "start": 1436, "end": 1650 }
class ____(models.Model): user = models.ForeignKey(User, models.CASCADE, to_field="username") article = models.ForeignKey(Article, models.CASCADE) # Models to test correct related_name inheritance
UserArticle
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeIs1.py
{ "start": 2078, "end": 4386 }
class ____(Animal): ... T = TypeVar("T") def is_marsupial(val: Animal) -> TypeIs[Kangaroo | Koala]: return isinstance(val, Kangaroo | Koala) # This should generate an error because list[T] isn't consistent with list[T | None]. def has_no_nones(val: list[T | None]) -> TypeIs[list[T]]: return None not in val def takes_int_typeis(f: Callable[[object], TypeIs[int]]) -> None: pass def int_typeis(val: object) -> TypeIs[int]: return isinstance(val, int) def bool_typeis(val: object) -> TypeIs[bool]: return isinstance(val, bool) takes_int_typeis(int_typeis) # This should generate an error because TypeIs is invariant. takes_int_typeis(bool_typeis) def is_two_element_tuple(val: tuple[T, ...]) -> TypeIs[tuple[T, T]]: return len(val) == 2 def func7(names: tuple[str, ...]): if is_two_element_tuple(names): reveal_type(names, expected_text="tuple[str, str]") else: reveal_type(names, expected_text="tuple[str, ...]") def is_int(obj: type) -> TypeIs[type[int]]: ... def func8(x: type) -> None: if is_int(x): reveal_type(x, expected_text="type[int]") def is_int_list(x: Collection[Any]) -> TypeIs[list[int]]: raise NotImplementedError def func9(val: Collection[object]) -> None: if is_int_list(val): reveal_type(val, expected_text="list[int]") else: reveal_type(val, expected_text="Collection[object]") @overload def func10(v: tuple[int | str, ...], b: Literal[False]) -> TypeIs[tuple[str, ...]]: ... @overload def func10( v: tuple[int | str, ...], b: Literal[True] = True ) -> TypeIs[tuple[int, ...]]: ... def func10(v: tuple[int | str, ...], b: bool = True) -> bool: ... v0 = is_int(int) v1: bool = v0 v2: int = v0 v3 = v0 & v0 def is_sequence_of_int(sequence: Sequence) -> TypeIs[Sequence[int]]: return all(isinstance(x, int) for x in sequence) def func11(v: Sequence[int] | Sequence[str]): if is_sequence_of_int(v): reveal_type(v, expected_text="Sequence[int]") else: reveal_type(v, expected_text="Sequence[str]") def func12(v: Sequence[int | str] | Sequence[list[Any]]): if is_sequence_of_int(v): reveal_type(v, expected_text="Sequence[int]") else: reveal_type(v, expected_text="Sequence[int | str] | Sequence[list[Any]]")
Koala
python
tornadoweb__tornado
tornado/test/simple_httpclient_test.py
{ "start": 30733, "end": 31308 }
class ____(AsyncHTTPTestCase): def get_app(self): class LargeBody(RequestHandler): def get(self): self.write("a" * 1024 * 100) return Application([("/large", LargeBody)]) def get_http_client(self): # 100KB body with 64KB buffer return SimpleAsyncHTTPClient( max_body_size=1024 * 100, max_buffer_size=1024 * 64 ) def test_large_body(self): response = self.fetch("/large") response.rethrow() self.assertEqual(response.body, b"a" * 1024 * 100)
MaxBufferSizeTest
python
getsentry__sentry
src/sentry/api/serializers/models/exporteddata.py
{ "start": 245, "end": 1743 }
class ____(Serializer): def get_attrs(self, item_list, user, **kwargs): attrs = {} serialized_users = { u["id"]: u for u in user_service.serialize_many( filter=dict(user_ids=[item.user_id for item in item_list]) ) } for item in item_list: if str(item.user_id) in serialized_users: serialized_user = serialized_users[str(item.user_id)] attrs[item] = { "user": { "id": serialized_user["id"], "email": serialized_user["email"], "username": serialized_user["username"], } } else: attrs[item] = {} return attrs def serialize(self, obj, attrs, user, **kwargs): file = obj._get_file() if file is None: checksum = None file_name = None else: checksum = file.checksum file_name = file.name return { "id": obj.id, "user": attrs.get("user"), "dateCreated": obj.date_added, "dateFinished": obj.date_finished, "dateExpired": obj.date_expired, "query": {"type": ExportQueryType.as_str(obj.query_type), "info": obj.query_info}, "status": obj.status, "checksum": checksum, "fileName": file_name, }
ExportedDataSerializer
python
gevent__gevent
src/gevent/libuv/watcher.py
{ "start": 26776, "end": 27906 }
class ____(_base.StatMixin, watcher): _watcher_type = 'fs_poll' _watcher_struct_name = 'gevent_fs_poll_t' _watcher_callback_name = '_gevent_fs_poll_callback3' def _watcher_set_data(self, the_watcher, data): the_watcher.handle.data = data return data def _watcher_ffi_init(self, args): return self._watcher_init(self.loop.ptr, self._watcher) MIN_STAT_INTERVAL = 0.1074891 # match libev; 0.0 is default def _watcher_ffi_start(self): # libev changes this when the watcher is started self._interval = max(self._interval, self.MIN_STAT_INTERVAL) self._watcher_start(self._watcher, self._watcher_callback, self._cpath, int(self._interval * 1000)) @property def _watcher_handle(self): return self._watcher.handle.data @property def attr(self): if not self._watcher.curr.st_nlink: return return self._watcher.curr @property def prev(self): if not self._watcher.prev.st_nlink: return return self._watcher.prev
stat
python
numpy__numpy
numpy/lib/tests/test_function_base.py
{ "start": 96321, "end": 100229 }
class ____: def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) assert_array_equal(X, np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])) assert_array_equal(Y, np.array([[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]])) def test_single_input(self): [X] = meshgrid([1, 2, 3, 4]) assert_array_equal(X, np.array([1, 2, 3, 4])) def test_no_input(self): args = [] assert_array_equal([], meshgrid(*args)) assert_array_equal([], meshgrid(*args, copy=False)) def test_indexing(self): x = [1, 2, 3] y = [4, 5, 6, 7] [X, Y] = meshgrid(x, y, indexing='ij') assert_array_equal(X, np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])) assert_array_equal(Y, np.array([[4, 5, 6, 7], [4, 5, 6, 7], [4, 5, 6, 7]])) # Test expected shapes: z = [8, 9] assert_(meshgrid(x, y)[0].shape == (4, 3)) assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') def test_sparse(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: # https://github.com/numpy/numpy/issues/4755 assert_raises(TypeError, meshgrid, [1, 2, 3], [4, 5, 6, 7], indices='ij') def test_return_type(self): # Test for appropriate dtype in returned arrays. # Regression test for issue #5297 # https://github.com/numpy/numpy/issues/5297 x = np.arange(0, 10, dtype=np.float32) y = np.arange(10, 20, dtype=np.float64) X, Y = np.meshgrid(x, y) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # copy X, Y = np.meshgrid(x, y, copy=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) # sparse X, Y = np.meshgrid(x, y, sparse=True) assert_(X.dtype == x.dtype) assert_(Y.dtype == y.dtype) def test_writeback(self): # Issue 8561 X = np.array([1.1, 2.2]) Y = np.array([3.3, 4.4]) x, y = np.meshgrid(X, Y, sparse=False, copy=True) x[0, :] = 0 assert_equal(x[0, :], 0) assert_equal(x[1, :], X) def test_nd_shape(self): a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) expected_shape = (2, 1, 3, 4, 5) assert_equal(a.shape, expected_shape) assert_equal(b.shape, expected_shape) assert_equal(c.shape, expected_shape) assert_equal(d.shape, expected_shape) assert_equal(e.shape, expected_shape) def test_nd_values(self): a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) def test_nd_indexing(self): a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) assert_equal(c, [[[3, 4, 5], [3, 4, 5]]])
TestMeshgrid
python
pandas-dev__pandas
pandas/tests/arithmetic/test_numeric.py
{ "start": 2693, "end": 4861 }
class ____: def test_operator_series_comparison_zerorank(self): # GH#13006 result = np.float64(0) > Series([1, 2, 3]) expected = 0.0 > Series([1, 2, 3]) tm.assert_series_equal(result, expected) result = Series([1, 2, 3]) < np.float64(0) expected = Series([1, 2, 3]) < 0.0 tm.assert_series_equal(result, expected) result = np.array([0, 1, 2])[0] > Series([0, 1, 2]) expected = 0.0 > Series([1, 2, 3]) tm.assert_series_equal(result, expected) def test_df_numeric_cmp_dt64_raises(self, box_with_array, fixed_now_ts): # GH#8932, GH#22163 ts = fixed_now_ts obj = np.array(range(5)) obj = tm.box_expected(obj, box_with_array) assert_invalid_comparison(obj, ts, box_with_array) def test_compare_invalid(self): # GH#8058 # ops testing a = Series(np.random.default_rng(2).standard_normal(5), name=0) b = Series(np.random.default_rng(2).standard_normal(5)) b.name = pd.Timestamp("2000-01-01") tm.assert_series_equal(a / b, 1 / (b / a)) def test_numeric_cmp_string_numexpr_path(self, box_with_array, monkeypatch): # GH#36377, GH#35700 box = box_with_array xbox = box if box is not Index else np.ndarray obj = Series(np.random.default_rng(2).standard_normal(51)) obj = tm.box_expected(obj, box, transpose=False) with monkeypatch.context() as m: m.setattr(expr, "_MIN_ELEMENTS", 50) result = obj == "a" expected = Series(np.zeros(51, dtype=bool)) expected = tm.box_expected(expected, xbox, transpose=False) tm.assert_equal(result, expected) with monkeypatch.context() as m: m.setattr(expr, "_MIN_ELEMENTS", 50) result = obj != "a" tm.assert_equal(result, ~expected) msg = "Invalid comparison between dtype=float64 and str" with pytest.raises(TypeError, match=msg): obj < "a" # ------------------------------------------------------------------ # Numeric dtypes Arithmetic with Datetime/Timedelta Scalar
TestNumericComparisons
python
numpy__numpy
numpy/lib/tests/test_type_check.py
{ "start": 5369, "end": 6810 }
class ____: def test_basic(self): z = np.array([-1, 0, 1]) assert_(not iscomplexobj(z)) z = np.array([-1j, 0, -1]) assert_(iscomplexobj(z)) def test_scalar(self): assert_(not iscomplexobj(1.0)) assert_(iscomplexobj(1 + 0j)) def test_list(self): assert_(iscomplexobj([3, 1 + 0j, True])) assert_(not iscomplexobj([3, 1, True])) def test_duck(self): class DummyComplexArray: @property def dtype(self): return np.dtype(complex) dummy = DummyComplexArray() assert_(iscomplexobj(dummy)) def test_pandas_duck(self): # This tests a custom np.dtype duck-typed class, such as used by pandas # (pandas.core.dtypes) class PdComplex(np.complex128): pass class PdDtype: name = 'category' names = None type = PdComplex kind = 'c' str = '<c16' base = np.dtype('complex128') class DummyPd: @property def dtype(self): return PdDtype dummy = DummyPd() assert_(iscomplexobj(dummy)) def test_custom_dtype_duck(self): class MyArray(list): @property def dtype(self): return complex a = MyArray([1 + 0j, 2 + 0j, 3 + 0j]) assert_(iscomplexobj(a))
TestIscomplexobj
python
walkccc__LeetCode
solutions/3395. Subsequences with a Unique Middle Mode I/3395-2.py
{ "start": 0, "end": 1237 }
class ____: def subsequencesWithMiddleMode(self, nums: list[int]) -> int: MOD = 1_000_000_007 ans = 0 p = collections.Counter() # prefix counter s = collections.Counter(nums) # suffix counter def nC2(n: int) -> int: return n * (n - 1) // 2 for i, a in enumerate(nums): s[a] -= 1 l = i r = len(nums) - i - 1 # Start with all possible subsequences with `a` as the middle number. ans += nC2(l) * nC2(r) # Minus the cases where the frequency of `a` is 1, so it's not a mode. ans -= nC2(l - p[a]) * nC2(r - s[a]) for b in p | s: if b == a: continue # Minus the cases where the middle number is not a "unique" mode. ans -= p[a] * p[b] * s[b] * (r - s[a] - s[b]) # [a b] a [b c] ans -= s[a] * s[b] * p[b] * (l - p[a] - p[b]) # [b c] a [a b] ans -= nC2(p[b]) * s[a] * (r - s[a] - s[b]) # [b b] a [a c] ans -= nC2(s[b]) * p[a] * (l - p[a] - p[b]) # [a c] a [b b] # Minus the cases where the middle number is not a mode. ans -= nC2(p[b]) * s[a] * s[b] # [b b] a [a b] ans -= nC2(s[b]) * p[a] * p[b] # [a b] a [b b] ans %= MOD p[a] += 1 return ans
Solution
python
Textualize__textual
src/textual/widgets/_welcome.py
{ "start": 744, "end": 1535 }
class ____(Static): """A Textual welcome widget. This widget can be used as a form of placeholder within a Textual application; although also see [Placeholder][textual.widgets._placeholder.Placeholder]. """ DEFAULT_CSS = """ Welcome { width: 100%; height: 100%; background: $surface; } Welcome Container { padding: 1; color: $foreground; } Welcome #text { margin: 0 1; } Welcome #close { dock: bottom; width: 100%; } """ def compose(self) -> ComposeResult: yield Container(Static(Markdown(WELCOME_MD), id="text"), id="md") yield Button("OK", id="close", variant="success")
Welcome
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/run_event.py
{ "start": 1076, "end": 1305 }
class ____(BaseModel): """Paginated run events response.""" items: list[DgApiRunEvent] total: int cursor: Optional[str] = None has_more: bool = False class Config: from_attributes = True
RunEventList
python
spyder-ide__spyder
spyder/plugins/editor/widgets/window.py
{ "start": 1933, "end": 1986 }
class ____: Window = "window"
EditorMainWindowMenus
python
gevent__gevent
src/gevent/tests/test__hub.py
{ "start": 2839, "end": 3175 }
class ____(gevent.testing.timing.AbstractGenericWaitTestCase): def setUp(self): super(TestWaiterGet, self).setUp() self.waiter = Waiter() def wait(self, timeout): with get_hub().loop.timer(timeout) as evt: evt.start(self.waiter.switch, None) return self.waiter.get()
TestWaiterGet
python
kamyu104__LeetCode-Solutions
Python/remove-k-balanced-substrings.py
{ "start": 37, "end": 900 }
class ____(object): def removeSubstring(self, s, k): """ :type s: str :type k: int :rtype: str """ def count(x): if x == '(': if cnt[0] < k: cnt[0] += 1 elif cnt[0] > k: cnt[0] = 1 else: if cnt[0] >= k: cnt[0] += 1 else: cnt[0] = 0 result = [] cnt = [0] for x in s: result.append(x) count(x) if cnt[0] != 2*k: continue for _ in xrange(2*k): result.pop() cnt[0] = 0 for i in xrange(max(len(result)-(2*k-1), 0), len(result)): count(result[i]) return "".join(result)
Solution
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 208657, "end": 213191 }
class ____(TypedDict, total=False): """ :class:`altair.PointSelectionConfigWithoutType` ``TypedDict`` wrapper. Parameters ---------- clear Clears the selection, emptying it of all values. This property can be a `Event Stream <https://vega.github.io/vega/docs/event-streams/>`__ or ``false`` to disable clear. **Default value:** ``dblclick``. **See also:** `clear examples <https://vega.github.io/vega-lite/docs/selection.html#clear>`__ in the documentation. encodings An array of encoding channels. The corresponding data field values must match for a data tuple to fall within the selection. **See also:** The `projection with encodings and fields section <https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the documentation. fields An array of field names whose values must match for a data tuple to fall within the selection. **See also:** The `projection with encodings and fields section <https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the documentation. nearest When true, an invisible voronoi diagram is computed to accelerate discrete selection. The data value *nearest* the mouse cursor is added to the selection. **Default value:** ``false``, which means that data values must be interacted with directly (e.g., clicked on) to be added to the selection. **See also:** `nearest examples <https://vega.github.io/vega-lite/docs/selection.html#nearest>`__ documentation. on A `Vega event stream <https://vega.github.io/vega/docs/event-streams/>`__ (object or selector) that triggers the selection. For interval selections, the event stream must specify a `start and end <https://vega.github.io/vega/docs/event-streams/#between-filters>`__. **See also:** `on examples <https://vega.github.io/vega-lite/docs/selection.html#on>`__ in the documentation. resolve With layered and multi-view displays, a strategy that determines how selections' data queries are resolved when applied in a filter transform, conditional encoding rule, or scale domain. One of: * ``"global"`` -- only one brush exists for the entire SPLOM. When the user begins to drag, any previous brushes are cleared, and a new one is constructed. * ``"union"`` -- each cell contains its own brush, and points are highlighted if they lie within *any* of these individual brushes. * ``"intersect"`` -- each cell contains its own brush, and points are highlighted only if they fall within *all* of these individual brushes. **Default value:** ``global``. **See also:** `resolve examples <https://vega.github.io/vega-lite/docs/selection.html#resolve>`__ in the documentation. toggle Controls whether data values should be toggled (inserted or removed from a point selection) or only ever inserted into point selections. One of: * ``true`` -- the default behavior, which corresponds to ``"event.shiftKey"``. As a result, data values are toggled when the user interacts with the shift-key pressed. * ``false`` -- disables toggling behaviour; the selection will only ever contain a single data value corresponding to the most recent interaction. * A `Vega expression <https://vega.github.io/vega/docs/expressions/>`__ which is re-evaluated as the user interacts. If the expression evaluates to ``true``, the data value is toggled into or out of the point selection. If the expression evaluates to ``false``, the point selection is first cleared, and the data value is then inserted. For example, setting the value to the Vega expression ``"true"`` will toggle data values without the user pressing the shift-key. **Default value:** ``true`` **See also:** `toggle examples <https://vega.github.io/vega-lite/docs/selection.html#toggle>`__ in the documentation. """ clear: str | bool | MergedStreamKwds | DerivedStreamKwds encodings: Sequence[SingleDefUnitChannel_T] fields: Sequence[str] nearest: bool on: str | MergedStreamKwds | DerivedStreamKwds resolve: SelectionResolution_T toggle: str | bool
PointSelectionConfigWithoutTypeKwds
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/exc.py
{ "start": 9793, "end": 9972 }
class ____(InvalidRequestError): """An object that tracks state encountered an illegal state change of some kind. .. versionadded:: 2.0 """
IllegalStateChangeError
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 84443, "end": 85621 }
class ____(Operation): def call(self, x, bins): return backend.numpy.digitize(x, bins) def compute_output_spec(self, x, bins): bins_shape = bins.shape if len(bins_shape) > 1: raise ValueError( f"`bins` must be a 1D array. Received: bins={bins} " f"with shape bins.shape={bins_shape}" ) sparse = getattr(x, "sparse", False) return KerasTensor(x.shape, dtype="int32", sparse=sparse) @keras_export(["keras.ops.digitize", "keras.ops.numpy.digitize"]) def digitize(x, bins): """Returns the indices of the bins to which each value in `x` belongs. Args: x: Input array to be binned. bins: Array of bins. It has to be one-dimensional and monotonically increasing. Returns: Output array of indices, of same shape as `x`. Example: >>> x = np.array([0.0, 1.0, 3.0, 1.6]) >>> bins = np.array([0.0, 3.0, 4.5, 7.0]) >>> keras.ops.digitize(x, bins) array([1, 1, 2, 1]) """ if any_symbolic_tensors((x, bins)): return Digitize().symbolic_call(x, bins) return backend.numpy.digitize(x, bins)
Digitize
python
marshmallow-code__apispec
tests/test_ext_marshmallow.py
{ "start": 779, "end": 7703 }
class ____: @pytest.mark.parametrize("schema", [PetSchema, PetSchema()]) def test_can_use_schema_as_definition(self, spec, schema): spec.components.schema("Pet", schema=schema) definitions = get_schemas(spec) props = definitions["Pet"]["properties"] assert props["id"]["type"] == "integer" assert props["name"]["type"] == "string" def test_schema_helper_without_schema(self, spec): spec.components.schema("Pet", {"properties": {"key": {"type": "integer"}}}) definitions = get_schemas(spec) assert definitions["Pet"]["properties"] == {"key": {"type": "integer"}} @pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()]) def test_resolve_schema_dict_auto_reference(self, schema): def resolver(schema): schema_cls = common.resolve_schema_cls(schema) return schema_cls.__name__ spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) with pytest.raises(KeyError): get_schemas(spec) spec.components.schema("analysis", schema=schema) spec.path( "/test", operations={ "get": { "responses": { "200": {"schema": build_ref(spec, "schema", "analysis")} } } }, ) definitions = get_schemas(spec) assert 3 == len(definitions) assert "analysis" in definitions assert "SampleSchema" in definitions assert "RunSchema" in definitions @pytest.mark.parametrize( "schema", [AnalysisWithListSchema, AnalysisWithListSchema()] ) def test_resolve_schema_dict_auto_reference_in_list(self, schema): def resolver(schema): schema_cls = common.resolve_schema_cls(schema) return schema_cls.__name__ spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) with pytest.raises(KeyError): get_schemas(spec) spec.components.schema("analysis", schema=schema) spec.path( "/test", operations={ "get": { "responses": { "200": {"schema": build_ref(spec, "schema", "analysis")} } } }, ) definitions = get_schemas(spec) assert 3 == len(definitions) assert "analysis" in definitions assert "SampleSchema" in definitions assert "RunSchema" in definitions @pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()]) def test_resolve_schema_dict_auto_reference_return_none(self, schema): def resolver(schema): return None spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) with pytest.raises(KeyError): get_schemas(spec) with pytest.raises( APISpecError, match="Name resolver returned None for schema" ): spec.components.schema("analysis", schema=schema) @pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()]) def test_warning_when_schema_added_twice(self, spec, schema): spec.components.schema("Analysis", schema=schema) with pytest.warns(UserWarning, match="has already been added to the spec"): spec.components.schema("DuplicateAnalysis", schema=schema) def test_schema_instances_with_different_modifiers_added(self, spec): class MultiModifierSchema(Schema): pet_unmodified = Nested(PetSchema) pet_exclude = Nested(PetSchema, exclude=("name",)) spec.components.schema("Pet", schema=PetSchema()) spec.components.schema("Pet_Exclude", schema=PetSchema(exclude=("name",))) spec.components.schema("MultiModifierSchema", schema=MultiModifierSchema) definitions = get_schemas(spec) pet_unmodified_ref = definitions["MultiModifierSchema"]["properties"][ "pet_unmodified" ] assert pet_unmodified_ref == build_ref(spec, "schema", "Pet") pet_exclude = definitions["MultiModifierSchema"]["properties"]["pet_exclude"] assert pet_exclude == build_ref(spec, "schema", "Pet_Exclude") def test_schema_instance_with_different_modifers_custom_resolver(self, recwarn): class MultiModifierSchema(Schema): pet_unmodified = Nested(PetSchema) pet_exclude = Nested(PetSchema(partial=True)) def resolver(schema): schema_instance = common.resolve_schema_instance(schema) prefix = "Partial-" if schema_instance.partial else "" schema_cls = common.resolve_schema_cls(schema) name = prefix + schema_cls.__name__ if name.endswith("Schema"): return name[:-6] or name return name spec = APISpec( title="Test Custom Resolver for Partial", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.components.schema("NameClashSchema", schema=MultiModifierSchema) assert not recwarn def test_schema_with_clashing_names(self, spec): class Pet(PetSchema): another_field = String() class NameClashSchema(Schema): pet_1 = Nested(PetSchema) pet_2 = Nested(Pet) with pytest.warns( UserWarning, match="Multiple schemas resolved to the name Pet" ): spec.components.schema("NameClashSchema", schema=NameClashSchema) definitions = get_schemas(spec) assert "Pet" in definitions assert "Pet1" in definitions def test_resolve_nested_schema_many_true_resolver_return_none(self): def resolver(schema): return None class PetFamilySchema(Schema): pets_1 = Nested(PetSchema, many=True) pets_2 = List(Nested(PetSchema)) spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.components.schema("PetFamily", schema=PetFamilySchema) props = get_schemas(spec)["PetFamily"]["properties"] pets_1 = props["pets_1"] pets_2 = props["pets_2"] assert pets_1["type"] == pets_2["type"] == "array"
TestDefinitionHelper
python
ansible__ansible
test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated_calls.py
{ "start": 1536, "end": 22241 }
class ____(pylint.checkers.BaseChecker): """Checks for deprecated calls to ensure proper usage.""" name = 'deprecated-calls' msgs = { 'E9501': ( "Deprecated version %r found in call to %r", "ansible-deprecated-version", None, ), 'E9502': ( "Found %r call without a version or date", "ansible-deprecated-no-version", None, ), 'E9503': ( "Invalid deprecated version %r found in call to %r", "ansible-invalid-deprecated-version", None, ), 'E9504': ( "Deprecated version %r found in call to %r", "collection-deprecated-version", None, ), 'E9505': ( "Invalid deprecated version %r found in call to %r", "collection-invalid-deprecated-version", None, ), 'E9506': ( "No collection_name or deprecator found in call to %r", "ansible-deprecated-no-collection-name", None, ), 'E9507': ( "Wrong collection_name %r found in call to %r", "wrong-collection-deprecated", None, ), 'E9508': ( "Expired date %r found in call to %r", "ansible-expired-deprecated-date", None, ), 'E9509': ( "Invalid date %r found in call to %r", "ansible-invalid-deprecated-date", None, ), 'E9510': ( "Both version and date found in call to %r", "ansible-deprecated-both-version-and-date", None, ), 'E9511': ( "Removal version %r must be a major release, not a minor or patch release, see https://semver.org/", "removal-version-must-be-major", None, ), 'E9512': ( "Passing date is not permitted in call to %r for ansible-core, use a version instead", "ansible-deprecated-date-not-permitted", None, ), 'E9513': ( "Unnecessary %r found in call to %r", "ansible-deprecated-unnecessary-collection-name", None, ), 'E9514': ( "Passing collection_name not permitted in call to %r for ansible-core, use deprecator instead", "ansible-deprecated-collection-name-not-permitted", None, ), 'E9515': ( "Both collection_name and deprecator found in call to %r", "ansible-deprecated-both-collection-name-and-deprecator", None, ), } options = ( ( 'collection-name', dict( default=None, type='string', metavar='<name>', help="The name of the collection to check.", ), ), ( 'collection-version', dict( default=None, type='string', metavar='<version>', help="The version of the collection to check.", ), ), ( 'collection-path', dict( default=None, type='string', metavar='<path>', help="The path of the collection to check.", ), ), ) ANSIBLE_VERSION = StrictVersion(re.match('[0-9.]*[0-9]', ansible.release.__version__)[0]) """The current ansible-core X.Y.Z version.""" DEPRECATION_MODULE_FUNCTIONS: dict[tuple[str, str], tuple[str, ...]] = { ('ansible.module_utils.common.warnings', 'deprecate'): ('msg', 'version', 'date', 'collection_name'), ('ansible.module_utils.datatag', 'deprecate_value'): ('value', 'msg'), ('ansible.module_utils.basic', 'AnsibleModule.deprecate'): ('msg', 'version', 'date', 'collection_name'), ('ansible.utils.display', 'Display.deprecated'): ('msg', 'version', 'removed', 'date', 'collection_name'), } """Mapping of deprecation module+function and their positional arguments.""" DEPRECATION_MODULES = frozenset(key[0] for key in DEPRECATION_MODULE_FUNCTIONS) """Modules which contain deprecation functions.""" DEPRECATION_FUNCTIONS = {'.'.join(key): value for key, value in DEPRECATION_MODULE_FUNCTIONS.items()} """Mapping of deprecation functions and their positional arguments.""" def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.module_cache: dict[str, astroid.nodes.Module] = {} @functools.cached_property def collection_name(self) -> str | None: """Return the collection name, or None if ansible-core is being tested.""" return self.linter.config.collection_name or None @functools.cached_property def collection_path(self) -> pathlib.Path: """Return the collection path. Not valid when ansible-core is being tested.""" return pathlib.Path(self.linter.config.collection_path) @functools.cached_property def collection_version(self) -> SemanticVersion | None: """Return the collection version, or None if ansible-core is being tested.""" if not self.linter.config.collection_version: return None sem_ver = SemanticVersion(self.linter.config.collection_version) sem_ver.prerelease = () # ignore pre-release for version comparison to catch issues before the final release is cut return sem_ver @functools.cached_property def is_ansible_core(self) -> bool: """True if ansible-core is being tested.""" return not self.collection_name @functools.cached_property def today_utc(self) -> datetime.date: """Today's date in UTC.""" return datetime.datetime.now(tz=datetime.timezone.utc).date() def is_deprecator_required(self) -> bool | None: """Determine is a `collection_name` or `deprecator` is required (True), unnecessary (False) or optional (None).""" if self.is_ansible_core: return False # in ansible-core, never provide the deprecator -- if it really is needed, disable the sanity test inline for that line of code plugin_info = _path_as_collection_plugininfo(self.linter.current_file) if plugin_info is INDETERMINATE_DEPRECATOR: return True # deprecator cannot be detected, caller must provide deprecator # deprecated: description='deprecate collection_name/deprecator now that detection is widely available' core_version='2.23' # When this deprecation triggers, change the return type here to False. # At that point, callers should be able to omit the collection_name/deprecator in all but a few cases (inline ignores can be used for those cases) return None @pylint.checkers.utils.only_required_for_messages(*(msgs.keys())) def visit_call(self, node: astroid.nodes.Call) -> None: """Visit a call node.""" if inferred := self.infer(node.func): name = self.get_fully_qualified_name(inferred) if args := self.DEPRECATION_FUNCTIONS.get(name): self.check_call(node, name, args) def infer(self, node: astroid.nodes.NodeNG) -> astroid.nodes.NodeNG | None: """Return the inferred node from the given node, or `None` if it cannot be unambiguously inferred.""" names: list[str] = [] target: astroid.nodes.NodeNG | None = node inferred: astroid.typing.InferenceResult | None = None while target: if inferred := astroid.util.safe_infer(target): break if isinstance(target, astroid.nodes.Call): inferred = self.infer(target.func) break if isinstance(target, astroid.nodes.FunctionDef): inferred = target break if isinstance(target, astroid.nodes.Name): target = self.infer_name(target) elif isinstance(target, astroid.nodes.AssignName) and isinstance(target.parent, astroid.nodes.Assign): target = target.parent.value elif isinstance(target, astroid.nodes.Attribute): names.append(target.attrname) target = target.expr else: break for name in reversed(names): if isinstance(inferred, astroid.bases.Instance): try: attr = next(iter(inferred.getattr(name)), None) except astroid.exceptions.AttributeInferenceError: break if isinstance(attr, astroid.nodes.AssignAttr): inferred = self.get_ansible_module(attr) continue if isinstance(attr, astroid.nodes.FunctionDef): inferred = attr continue if not isinstance(inferred, (astroid.nodes.Module, astroid.nodes.ClassDef)): inferred = None break try: inferred = inferred[name] except KeyError: inferred = None else: inferred = self.infer(inferred) if isinstance(inferred, astroid.nodes.FunctionDef) and isinstance(inferred.parent, astroid.nodes.ClassDef): inferred = astroid.bases.BoundMethod(inferred, inferred.parent) return inferred def infer_name(self, node: astroid.nodes.Name) -> astroid.nodes.NodeNG | None: """Infer the node referenced by the given name, or `None` if it cannot be unambiguously inferred.""" scope = node.scope() inferred: astroid.nodes.NodeNG | None = None name = node.name while scope: try: assignment = scope[name] except KeyError: scope = scope.parent.scope() if scope.parent else None continue if isinstance(assignment, astroid.nodes.AssignName) and isinstance(assignment.parent, astroid.nodes.Assign): inferred = assignment.parent.value elif ( isinstance(scope, astroid.nodes.FunctionDef) and isinstance(assignment, astroid.nodes.AssignName) and isinstance(assignment.parent, astroid.nodes.Arguments) and assignment.parent.annotations ): idx, _node = assignment.parent.find_argname(name) if idx is not None: try: annotation = assignment.parent.annotations[idx] except IndexError: pass else: if isinstance(annotation, astroid.nodes.Name): name = annotation.name continue elif isinstance(assignment, astroid.nodes.ClassDef): inferred = assignment elif isinstance(assignment, astroid.nodes.ImportFrom): if module := self.get_module(assignment): name = assignment.real_name(name) scope = module.scope() continue break return inferred def get_module(self, node: astroid.nodes.ImportFrom) -> astroid.nodes.Module | None: """Import the requested module if possible and cache the result.""" module_name = pylint.checkers.utils.get_import_name(node, node.modname) if module_name not in self.DEPRECATION_MODULES: return None # avoid unnecessary import overhead if module := self.module_cache.get(module_name): return module module = node.do_import_module() if module.name != module_name: raise RuntimeError(f'Attempted to import {module_name!r} but found {module.name!r} instead.') self.module_cache[module_name] = module return module @staticmethod def get_fully_qualified_name(node: astroid.nodes.NodeNG) -> str | None: """Return the fully qualified name of the given inferred node.""" parent = node.parent parts: tuple[str, ...] | None if isinstance(node, astroid.nodes.FunctionDef) and isinstance(parent, astroid.nodes.Module): parts = (parent.name, node.name) elif isinstance(node, astroid.bases.BoundMethod) and isinstance(parent, astroid.nodes.ClassDef) and isinstance(parent.parent, astroid.nodes.Module): parts = (parent.parent.name, parent.name, node.name) else: parts = None return '.'.join(parts) if parts else None def check_call(self, node: astroid.nodes.Call, name: str, args: tuple[str, ...]) -> None: """Check the given deprecation call node for valid arguments.""" call_args = self.get_deprecation_call_args(node, args) self.check_collection_name(node, name, call_args) if not call_args.version and not call_args.date: self.add_message('ansible-deprecated-no-version', node=node, args=(name,)) return if call_args.date and self.is_ansible_core: self.add_message('ansible-deprecated-date-not-permitted', node=node, args=(name,)) return if call_args.all_args_dynamic(): # assume collection maintainers know what they're doing if all args are dynamic return if call_args.version and call_args.date: self.add_message('ansible-deprecated-both-version-and-date', node=node, args=(name,)) return if call_args.date: self.check_date(node, name, call_args) if call_args.version: self.check_version(node, name, call_args) @staticmethod def get_deprecation_call_args(node: astroid.nodes.Call, args: tuple[str, ...]) -> DeprecationCallArgs: """Get the deprecation call arguments from the given node.""" fields: dict[str, object] = {} for idx, arg in enumerate(node.args): field = args[idx] fields[field] = arg for keyword in node.keywords: if keyword.arg is not None: fields[keyword.arg] = keyword.value for key, value in fields.items(): if isinstance(value, astroid.nodes.Const): fields[key] = value.value return DeprecationCallArgs(**fields) def check_collection_name(self, node: astroid.nodes.Call, name: str, args: DeprecationCallArgs) -> None: """Check the collection name provided to the given call node.""" deprecator_requirement = self.is_deprecator_required() if self.is_ansible_core and args.collection_name: self.add_message('ansible-deprecated-collection-name-not-permitted', node=node, args=(name,)) return if args.collection_name and args.deprecator: self.add_message('ansible-deprecated-both-collection-name-and-deprecator', node=node, args=(name,)) if deprecator_requirement is True: if not args.collection_name and not args.deprecator: self.add_message('ansible-deprecated-no-collection-name', node=node, args=(name,)) return elif deprecator_requirement is False: if args.collection_name: self.add_message('ansible-deprecated-unnecessary-collection-name', node=node, args=('collection_name', name,)) return if args.deprecator: self.add_message('ansible-deprecated-unnecessary-collection-name', node=node, args=('deprecator', name,)) return else: # collection_name may be needed for backward compat with 2.18 and earlier, since it is only detected in 2.19 and later if args.deprecator: # Unlike collection_name, which is needed for backward compat, deprecator is generally not needed by collections. # For the very rare cases where this is needed by collections, an inline pylint ignore can be used to silence it. self.add_message('ansible-deprecated-unnecessary-collection-name', node=node, args=('deprecator', name,)) return if args.all_args_dynamic(): # assume collection maintainers know what they're doing if all args are dynamic return expected_collection_name = 'ansible.builtin' if self.is_ansible_core else self.collection_name if args.collection_name and args.collection_name != expected_collection_name: self.add_message('wrong-collection-deprecated', node=node, args=(args.collection_name, name)) def check_version(self, node: astroid.nodes.Call, name: str, args: DeprecationCallArgs) -> None: """Check the version provided to the given call node.""" if self.collection_name: self.check_collection_version(node, name, args) else: self.check_core_version(node, name, args) def check_core_version(self, node: astroid.nodes.Call, name: str, args: DeprecationCallArgs) -> None: """Check the core version provided to the given call node.""" try: if not isinstance(args.version, str) or not args.version: raise ValueError() strict_version = StrictVersion(args.version) except ValueError: self.add_message('ansible-invalid-deprecated-version', node=node, args=(args.version, name)) return if self.ANSIBLE_VERSION >= strict_version: self.add_message('ansible-deprecated-version', node=node, args=(args.version, name)) def check_collection_version(self, node: astroid.nodes.Call, name: str, args: DeprecationCallArgs) -> None: """Check the collection version provided to the given call node.""" try: if not isinstance(args.version, str) or not args.version: raise ValueError() semantic_version = SemanticVersion(args.version) except ValueError: self.add_message('collection-invalid-deprecated-version', node=node, args=(args.version, name)) return if self.collection_version >= semantic_version: self.add_message('collection-deprecated-version', node=node, args=(args.version, name)) if semantic_version.major != 0 and (semantic_version.minor != 0 or semantic_version.patch != 0): self.add_message('removal-version-must-be-major', node=node, args=(args.version,)) def check_date(self, node: astroid.nodes.Call, name: str, args: DeprecationCallArgs) -> None: """Check the date provided to the given call node.""" try: date_parsed = self.parse_isodate(args.date) except (ValueError, TypeError): self.add_message('ansible-invalid-deprecated-date', node=node, args=(args.date, name)) else: if date_parsed < self.today_utc: self.add_message('ansible-expired-deprecated-date', node=node, args=(args.date, name)) @staticmethod def parse_isodate(value: object) -> datetime.date: """Parse an ISO 8601 date string.""" if isinstance(value, str): return datetime.date.fromisoformat(value) raise TypeError(type(value)) def get_ansible_module(self, node: astroid.nodes.AssignAttr) -> astroid.bases.Instance | None: """Infer an AnsibleModule instance node from the given assignment.""" if isinstance(node.parent, astroid.nodes.Assign) and isinstance(node.parent.type_annotation, astroid.nodes.Name): inferred = self.infer_name(node.parent.type_annotation) elif (isinstance(node.parent, astroid.nodes.Assign) and isinstance(node.parent.parent, astroid.nodes.FunctionDef) and isinstance(node.parent.value, astroid.nodes.Name)): inferred = self.infer_name(node.parent.value) elif isinstance(node.parent, astroid.nodes.AnnAssign) and isinstance(node.parent.annotation, astroid.nodes.Name): inferred = self.infer_name(node.parent.annotation) else: inferred = None if isinstance(inferred, astroid.nodes.ClassDef) and inferred.name == 'AnsibleModule': return inferred.instantiate_class() return None def register(self) -> None: """Register this plugin.""" self.linter.register_checker(self) def register(linter: pylint.lint.PyLinter) -> None: """Required method to auto-register this checker.""" AnsibleDeprecatedChecker(linter).register()
AnsibleDeprecatedChecker
python
PyCQA__pylint
tests/functional/m/member/member_checks.py
{ "start": 2062, "end": 2222 }
class ____: """no-member shouldn't be emitted for classes with dunder getattr.""" def __getattr__(self, attr): return self.__dict__[attr]
Getattr
python
pandas-dev__pandas
asv_bench/benchmarks/io/csv.py
{ "start": 279, "end": 1293 }
class ____(BaseIO): fname = "__test__.csv" params = ["wide", "long", "mixed"] param_names = ["kind"] def setup(self, kind): wide_frame = DataFrame(np.random.randn(3000, 30)) long_frame = DataFrame( { "A": np.arange(50000), "B": np.arange(50000) + 1.0, "C": np.arange(50000) + 2.0, "D": np.arange(50000) + 3.0, } ) mixed_frame = DataFrame( { "float": np.random.randn(5000), "int": np.random.randn(5000).astype(int), "bool": (np.arange(5000) % 2) == 0, "datetime": date_range("2001", freq="s", periods=5000), "object": ["foo"] * 5000, } ) mixed_frame.loc[30:500, "float"] = np.nan data = {"wide": wide_frame, "long": long_frame, "mixed": mixed_frame} self.df = data[kind] def time_frame(self, kind): self.df.to_csv(self.fname)
ToCSV
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink25.py
{ "start": 315, "end": 945 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink25.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) # Turn off default URL format for testing. workbook.default_url_format = None worksheet = workbook.add_worksheet() worksheet.write_url("A1", "http://www.google.com/#foo#bar") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sympy__sympy
sympy/vector/basisdependent.py
{ "start": 10160, "end": 11871 }
class ____(BasisDependent): """ Class to denote a zero basis dependent instance. """ components: dict['BaseVector', Expr] = {} _latex_form: str def __new__(cls): obj = super().__new__(cls) # Pre-compute a specific hash value for the zero vector # Use the same one always obj._hash = (S.Zero, cls).__hash__() return obj def __hash__(self): return self._hash @call_highest_priority('__req__') def __eq__(self, other): return isinstance(other, self._zero_func) __req__ = __eq__ @call_highest_priority('__radd__') def __add__(self, other): if isinstance(other, self._expr_type): return other else: raise TypeError("Invalid argument types for addition") @call_highest_priority('__add__') def __radd__(self, other): if isinstance(other, self._expr_type): return other else: raise TypeError("Invalid argument types for addition") @call_highest_priority('__rsub__') def __sub__(self, other): if isinstance(other, self._expr_type): return -other else: raise TypeError("Invalid argument types for subtraction") @call_highest_priority('__sub__') def __rsub__(self, other): if isinstance(other, self._expr_type): return other else: raise TypeError("Invalid argument types for subtraction") def __neg__(self): return self def normalize(self): """ Returns the normalized version of this vector. """ return self def _sympystr(self, printer): return '0'
BasisDependentZero
python
joke2k__faker
faker/providers/internet/en_NZ/__init__.py
{ "start": 46, "end": 425 }
class ____(InternetProvider): free_email_domains = ( "gmail.com", "yahoo.com", "hotmail.com", "inspire.net.nz", "xtra.co.nz", ) tlds = ( "nz", "co.nz", "org.nz", "kiwi", "kiwi.nz", "geek.nz", "net.nz", "school.nz", "ac.nz", "maori.nz", )
Provider
python
ray-project__ray
python/ray/experimental/channel/cpu_communicator.py
{ "start": 299, "end": 3351 }
class ____: """ Barrier actor that blocks the given number of actors until all actors have reached the Barrier. p2p operations are not done here (completed via shared memory channel). """ def __init__(self, num_actors: int): self.num_actors = num_actors self.condition = asyncio.Condition() # Stores the data for each collective operation self.collective_data: Dict[int, List["torch.Tensor"]] = defaultdict(list) # Stores the shape of data for each collective operation self.collective_data_shape: Dict[int, "torch.Tensor.type"] = {} # Buffer for the number of actors seen self.num_actors_seen = defaultdict(int) # Number of actors who have read the result, and are about to exit the function. # State is kept so we only garbage collect after the last actor has read the # relevant data. self.num_actors_read = defaultdict(int) async def wait_collective(self, op_id: int, data: "torch.Tensor", op: ReduceOp): """ Wait at the communicator until all actors have sent `op_id` and `data`. Once data from all actors is received, execute the collective `op` on the communicator actor and return the result. """ async with self.condition: self.collective_data[op_id].append(data) self.num_actors_seen[op_id] += 1 if self.num_actors_seen[op_id] == self.num_actors: # Apply the collective operation across all gathered tensors data = self._apply_op(op, self.collective_data[op_id]) self.collective_data[op_id] = data self.condition.notify_all() else: await self.condition.wait_for( lambda: self.num_actors_seen[op_id] == self.num_actors ) data = self.collective_data[op_id] self.num_actors_read[op_id] += 1 if self.num_actors_read[op_id] == self.num_actors: del self.collective_data[op_id] del self.num_actors_seen[op_id] del self.num_actors_read[op_id] return data def _apply_op(self, op: ReduceOp, tensors: List["torch.Tensor"]) -> "torch.Tensor": """Apply the specified reduction operation across a list of tensors.""" result = tensors[0].clone() if op == ReduceOp.SUM: for tensor in tensors[1:]: result += tensor elif op == ReduceOp.PRODUCT: for tensor in tensors[1:]: result *= tensor elif op == ReduceOp.MAX: for tensor in tensors[1:]: result = torch.max(result, tensor) elif op == ReduceOp.MIN: for tensor in tensors[1:]: result = torch.min(result, tensor) elif op == ReduceOp.AVG: result = sum(tensors) / len(tensors) else: raise ValueError(f"Operation {op} not supported") return result
CPUCommBarrier
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/libtool_deletion/package.py
{ "start": 637, "end": 1237 }
class ____(autotools.AutotoolsBuilder): install_libtool_archives = False def autoreconf(self, pkg, spec, prefix): mkdirp(os.path.dirname(self.configure_abs_path)) touch(self.configure_abs_path) def configure(self, pkg, spec, prefix): pass def build(self, pkg, spec, prefix): pass def install(self, pkg, spec, prefix): mkdirp(os.path.dirname(self.libtool_archive_file)) touch(self.libtool_archive_file) @property def libtool_archive_file(self): return os.path.join(str(self.prefix.lib), "libfoo.la")
AutotoolsBuilder
python
bokeh__bokeh
src/bokeh/events.py
{ "start": 8097, "end": 8922 }
class ____(ModelEvent): ''' Announce a location where an axis was clicked. For continuous numerical axes, the value will be a number. For log axes, this number is the log decade. For categorical axes, the value will be a categorical factor, i.e. a string or a list of strings, representing the closest categorical factor that was clicked. ''' event_name = 'axis_click' value: float | FactorType | None def __init__(self, model: Axis | None, value: float | FactorType | None = None) -> None: from .models import Axis if model is not None and not isinstance(model, Axis): clsname = self.__class__.__name__ raise ValueError(f"{clsname} event only applies to axis models") super().__init__(model=model) self.value = value
AxisClick
python
getsentry__sentry
src/sentry/grouping/enhancer/matchers.py
{ "start": 13411, "end": 13504 }
class ____(ExceptionFieldMatch): field_path = ["mechanism", "type"]
ExceptionMechanismMatch
python
tensorflow__tensorflow
tensorflow/python/distribute/multi_process_runner.py
{ "start": 46501, "end": 60102 }
class ____(RuntimeError): """An error indicating `multi_process_runner.run` is used without init. When this is raised, user is supposed to call `tf.__internal__.distribute.multi_process_runner.test_main()` within `if __name__ == '__main__':` block to properly initialize `multi_process_runner.run`. """ pass def _check_initialization(): if not multi_process_lib.initialized(): raise NotInitializedError( '`multi_process_runner` is not initialized. ' 'Please call `tf.__internal__.distribute.multi_process_runner.' 'test_main()` within `if __name__ == \'__main__\':` block ' 'in your python module to properly initialize ' '`multi_process_runner`.') def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None): """Set TF_CONFIG environment variable.""" tf_config_dict = { 'cluster': cluster_spec, 'task': { 'type': task_type, 'index': task_id, }, } if rpc_layer is not None: tf_config_dict['rpc_layer'] = rpc_layer os.environ['TF_CONFIG'] = json.dumps(tf_config_dict) @tf_export('__internal__.distribute.multi_process_runner.run', v1=[]) def run(fn, cluster_spec, rpc_layer=None, max_run_time=None, return_output=False, timeout=_DEFAULT_TIMEOUT_SEC, args=None, kwargs=None): """Run `fn` in multiple processes according to `cluster_spec`. Given a callable `fn`, `tf.__internal__.distribute.multi_process_runner.run` launches multiple processes, each of which runs `fn`. These processes are referred to as "subprocesses" or "child processes". Each of those subprocesses will have their `TF_CONFIG` environment variable set, according to `cluster_spec` and their task types. The stdout of the subprocesses are streamed to the main process' and thus available in logs (if `stream_output` is True), with [type-id] prefix. `tf.__internal__.distribute.multi_process_runner.run` will block until all subprocesses have successfully exited, and return a namedtuple object that represents the run result. This object has a `return_value` attribute, which is a list that contains subprocesses `fn`'s return values, for those subprocesses that successfully returned from `fn`. The order of `return_value` list is not meaningful. If an optional arg `return_output` (default to False) is set to True, the namedtuple object will have an additional attribute `stdout`, which is a list containing the stdout of the subprocesses. If any subprocess' `fn` ends up raising an error, that error will be reraised from `tf.__internal__.distribute.multi_process_runner.run`, and the aforementioned namedtuple object will be available through the exception's `mpr_result` attribute. This utility is used for simulating running TensorFlow programs across multiple task types, and each of the task type may contain more than one task (except for "chief" where more than one task is prohibited). Test coverage of multi-worker training is the main application of this utility, where code written for multi-worker training can be realistically covered in unit tests. Any test module that uses `tf.__internal__.distribute.multi_process_runner.run()` must call `tf.__internal__.distribute.multi_process_runner.test_main()` instead of regular `test.main()` inside `if __name__ == '__main__':` block for proper initialization. Args: fn: Function to be run on child processes. This will be run on processes for all task types. cluster_spec: Dict for cluster spec. The utility function `tf.__internal__.distribute.multi_process_runner.create_cluster_spec` can be conveniently used to create such dict. The following is an example of cluster with three workers and two ps's. {"worker": ["worker0.example.com:2222", "worker1.example.com:2222", "worker2.example.com:2222"], "ps": ["ps0.example.com:2222", "ps1.example.com:2222"]} rpc_layer: RPC layer to use. Default value is 'grpc'. max_run_time: `None` or integer. If not `None`, child processes are forced to exit at approximately this many seconds after this utility is called. We achieve this through `signal.alarm()` api. Note that this is best effort at Python level since Python signal handler does not get executed when it runs lower level C/C++ code. So it can be delayed for arbitrarily long time. If any of the child process is still running when `max_run_time` is up, they will be force-terminated and an `tf.__internal__.distribute.multi_process_runner .UnexpectedSubprocessExitError` may be raised. If `None`, child processes are not forced to exit. return_output: If True, the output/error from the subprocesses should be collected to be attached to the resulting namedtuple returned from this utility. The list of output can be retrieved via `stdout` attribute. Defaults to False. timeout: optional integer or `None`. If provided as an integer, and not all processes report status within roughly `timeout` seconds, a `tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError` exception will be raised. If `None`, `tf.__internal__.distribute.multi_process_runner.run` never times out. Defaults to the constant `_DEFAULT_TIMEOUT_SEC` defined in `multi_process_runner` module. args: Positional arguments to be sent to `fn` run on subprocesses. kwargs: Keyword arguments to be sent to `fn` run on subprocesses. Returns: A namedtuple object, which has two attributes, `return_value` and `stdout`. `return_value` always contains a list of returnvalues from the subprocesses, although the order is not meaningful. If `return_output` argument is True, `stdout` is available that contains a list of all messages from subprocesses' stdout and stderr, and the order is mostly chronological. Raises: RuntimeError: if `tf.__internal__.distribute.multi_process_runner.test_main()` is not called in test's `if __name__ == '__main__':` block. ValueError: if there are more than one chief in the `cluster_spec`. tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError: if not all processes report status approximately within `timeout` seconds. When this is raised, a namedtuple object can be retrieved by `tf.__internal__.distribute.multi_process_runner.SubprocessTimeoutError`'s `mpr_result` attribute, which has the same structure as above 'Returns' section describes. tf.__internal__.distribute.multi_process_runner .UnexpectedSubprocessExitError: If any of the subprocesses did not exit properly (for example, they exit on SIGTERM or SIGKILL signal). When this is raised, a namedtuple object can be retrieved by `tf.__internal__.distribute.multi_process_runner .UnexpectedSubprocessExitError`'s `mpr_result` attribute, which has the same structure as above 'Returns' section describes. If `max_run_time` is not `None`, it is expected that some subprocesses may be force-killed when `max_run_time` is up, and this is raised in those cases. Exception: if there is an Exception propagated from any subprocess. When this is raised, a namedtuple object can be retrieved by `tf.__internal__.distribute.multi_process_runner .UnexpectedSubprocessExitError` `mpr_result` attribute, which has the same structure as above 'Returns' section describes. Examples: ```python class SimpleMultiProcessTest(tf.test.TestCase): def test_simple_printing_and_return(self): def fn(): resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver() # This will print "[chief-0]: Task type: chief , task id: 0" # for chief, for example. logging.info('Task type: %s, task id: %d', resolver.task_type, resolver.task_id) return resolver.task_type result = tf.__internal__.distribute.multi_process_runner.run( fn=fn, cluster_spec=( tf.__internal__ .distribute.multi_process_runner.create_cluster_spec( has_chief=True, num_workers=2))) assert sorted(result.return_value) == ['chief', 'worker', 'worker'] def test_error_from_fn(self): def fn(): resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver() raise ValueError('Task type {}, task id {} is errors out'.format( resolver.task_type, resolver.task_id)) with self.assertRaisesRegex(ValueError, 'Task type worker, task id 0 is errors out'): cluster_spec = ( tf.__internal__.distribute.multi_process_runner.create_cluster_spec( num_workers=1)) tf.__internal__.distribute.multi_process_runner.run( fn=fn, cluster_spec=cluster_spec) if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main() ``` """ runner = MultiProcessRunner( fn, cluster_spec, rpc_layer, max_run_time=max_run_time, return_output=return_output, args=args, kwargs=kwargs) runner.start() return runner.join(timeout) # This is set by MultiProcessRunner in worker processes. _barrier = None @tf_export('__internal__.distribute.multi_process_runner.get_barrier', v1=[]) def get_barrier(): """Returns a `multiprocessing.Barrier` for `multi_process_runner.run`. `tf.__internal__.distribute.multi_process_runner.get_barrier()` returns a `multiprocessing.Barrier` object which can be used within `fn` of `tf.__internal__.distribute.multi_process_runner` to wait with `barrier.wait()` call until all other tasks have also reached the `barrier.wait()` call, before they can proceed individually. Note that all tasks (subprocesses) have to reach `barrier.wait()` call to proceed. Currently it is not supported to block on only a subset of tasks in the cluster. Example: ```python def fn(): some_work_to_be_done_by_all_tasks() tf.__internal__.distribute.multi_process_runner.get_barrier().wait() # The barrier guarantees that at this point, all tasks have finished # `some_work_to_be_done_by_all_tasks()` some_other_work_to_be_done_by_all_tasks() result = tf.__internal__.distribute.multi_process_runner.run( fn=fn, cluster_spec=( tf.__internal__ .distribute.multi_process_runner.create_cluster_spec( num_workers=2))) ``` Returns: A `multiprocessing.Barrier` for `multi_process_runner.run`. """ if _barrier is None: raise ValueError( 'barrier is not defined. It is likely because you are calling ' 'get_barrier() in the main process. get_barrier() can only be called ' 'in the subprocesses.' ) return _barrier _manager = None _manager_lock = threading.Lock() def manager(): """Returns the multiprocessing manager object for concurrency tools. The manager object is useful as it controls a server process that holds the python objects that can be shared across processes. This can be used for parent-subprocess communication: ```python manager = multi_process_runner.manager() some_event_happening_in_subprocess = manager.Event() mpr = multi_process_runner.MultiProcessRunner(fn, cluster_spec, args=(some_event_happening_in_subprocess,)) mpr.start() some_event_happening_in_subprocess.wait() # Do something that only should after some event happens in subprocess. ``` Note that the user of multi_process_runner should not create additional `multiprocessing.Manager()` objects; doing so can result in segfault in some cases. This method should only be called after multi_process_runner.test_main() is called. """ _check_initialization() global _manager with _manager_lock: if _manager is None: _manager = multiprocessing.Manager() return _manager @tf_export('__internal__.distribute.multi_process_runner.test_main', v1=[]) def test_main(): """Main function to be called within `__main__` of a test file. Any test module that uses `tf.__internal__.distribute.multi_process_runner.run()` must call this instead of regular `test.main()` inside `if __name__ == '__main__':` block, or an error will be raised when `tf.__internal__.distribute.multi_process_runner.run()` is used. This method takes care of needed initialization for launching multiple subprocesses. Example: ```python class MyTestClass(tf.test.TestCase): def testSomething(self): # Testing code making use of # `tf.__internal__.distribute.multi_process_runner.run()`. if __name__ == '__main__': tf.__internal__.distribute.multi_process_runner.test_main() ``` """ # Inject tearDownModule() to shut down all pool runners. Active pool runners # will block the program from exiting. This is necessary for global pool # runners. We tried atexit in the past, and it doesn't work in some # deployment. old_tear_down_module = getattr(sys.modules['__main__'], 'tearDownModule', None) def tear_down_module(): _shutdown_all_pool_runners() if old_tear_down_module is not None: old_tear_down_module() setattr(sys.modules['__main__'], 'tearDownModule', tear_down_module) multi_process_lib.test_main()
NotInitializedError
python
jazzband__tablib
src/tablib/core.py
{ "start": 2263, "end": 25261 }
class ____: """The :class:`Dataset` object is the heart of Tablib. It provides all core functionality. Usually you create a :class:`Dataset` instance in your main module, and append rows as you collect data. :: data = tablib.Dataset() data.headers = ('name', 'age') for (name, age) in some_collector(): data.append((name, age)) Setting columns is similar. The column data length must equal the current height of the data and headers must be set. :: data = tablib.Dataset() data.headers = ('first_name', 'last_name') data.append(('John', 'Adams')) data.append(('George', 'Washington')) data.append_col((90, 67), header='age') You can also set rows and headers upon instantiation. This is useful if dealing with dozens or hundreds of :class:`Dataset` objects. :: headers = ('first_name', 'last_name') data = [('John', 'Adams'), ('George', 'Washington')] data = tablib.Dataset(*data, headers=headers) :param \\*args: (optional) list of rows to populate Dataset :param headers: (optional) list strings for Dataset header row :param title: (optional) string to use as title of the Dataset .. admonition:: Format Attributes Definition If you look at the code, the various output/import formats are not defined within the :class:`Dataset` object. To add support for a new format, see :ref:`Adding New Formats <newformats>`. """ def __init__(self, *args, **kwargs): self._data = [Row(arg) for arg in args] self.__headers = None # ('title', index) tuples self._separators = [] # (column, callback) tuples self._formatters = [] # {col_index: col_func} self._dynamic_columns = {} self.headers = kwargs.get('headers') self.title = kwargs.get('title') def __len__(self): return self.height def __getitem__(self, key): if isinstance(key, str): if key in self.headers: pos = self.headers.index(key) # get 'key' index from each data return [row[pos] for row in self._data] else: raise KeyError else: _results = self._data[key] if isinstance(_results, Row): return _results.tuple else: return [result.tuple for result in _results] def __setitem__(self, key, value): self._validate(value) self._data[key] = Row(value) def __delitem__(self, key): if isinstance(key, str): if key in self.headers: pos = self.headers.index(key) del self.headers[pos] if pos in self._dynamic_columns: del self._dynamic_columns[pos] for i, row in enumerate(self._data): del row[pos] self._data[i] = row else: raise KeyError else: del self._data[key] def __repr__(self): try: return f'<{self.title.lower()} dataset>' except AttributeError: return '<dataset object>' def __str__(self): result = [] # Add str representation of headers. if self.__headers: result.append([str(h) for h in self.__headers]) # Add str representation of rows. result.extend(list(map(str, row)) for row in self._data) lens = [list(map(len, row)) for row in result] field_lens = list(map(max, zip(*lens))) # delimiter between header and data if self.__headers: result.insert(1, ['-' * length for length in field_lens]) format_string = '|'.join('{{{}:{}}}'.format(*item) for item in enumerate(field_lens)) return '\n'.join(format_string.format(*row) for row in result) # --------- # Internals # --------- def _get_in_format(self, fmt_key, **kwargs): return registry.get_format(fmt_key).export_set(self, **kwargs) def _set_in_format(self, fmt_key, in_stream, **kwargs): in_stream = normalize_input(in_stream) return registry.get_format(fmt_key).import_set(self, in_stream, **kwargs) def _validate(self, row=None, col=None, safety=False): """Assures size of every row in dataset is of proper proportions.""" if row: if self.width: is_valid = ( len(row) == self.width or len(row) == (self.width - len(self._dynamic_columns)) ) else: is_valid = True elif col: if len(col) < 1: is_valid = True else: is_valid = (len(col) == self.height) if self.height else True else: is_valid = all(len(x) == self.width for x in self._data) if is_valid: return True else: if not safety: raise InvalidDimensions return False def _package(self, dicts=True): """Packages Dataset into lists of dictionaries for transmission.""" # TODO: Dicts default to false? _data = list(self._data) def format_row(row): # Execute formatters if self._formatters: row = row.copy() # To not mutate internal data structure for col, callback in self._formatters: if col is None: # Apply formatter to all cells row = [callback(cell) for cell in row] else: row[col] = callback(row[col]) return list(row) if self.headers: if dicts: data = [dict(list(zip(self.headers, format_row(row)))) for row in _data] else: data = [list(self.headers)] + [format_row(row) for row in _data] else: data = [format_row(row) for row in _data] return data def _get_headers(self): """An *optional* list of strings to be used for header rows and attribute names. This must be set manually. The given list length must equal :attr:`Dataset.width`. """ return self.__headers def _set_headers(self, collection): """Validating headers setter.""" self._validate(collection) if collection: self.__headers = list(collection) else: self.__headers = None headers = property(_get_headers, _set_headers) def _get_dict(self): """A native Python representation of the :class:`Dataset` object. If headers have been set, a list of Python dictionaries will be returned. If no headers have been set, a list of tuples (rows) will be returned instead. A dataset object can also be imported by setting the `Dataset.dict` attribute: :: data = tablib.Dataset() data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}] """ return self._package() def _set_dict(self, pickle): """A native Python representation of the Dataset object. If headers have been set, a list of Python dictionaries will be returned. If no headers have been set, a list of tuples (rows) will be returned instead. A dataset object can also be imported by setting the :attr:`Dataset.dict` attribute. :: data = tablib.Dataset() data.dict = [{'age': 90, 'first_name': 'Kenneth', 'last_name': 'Reitz'}] """ error_details = ( "Please check format documentation " "https://tablib.readthedocs.io/en/stable/formats.html#yaml" ) if not pickle: return if not isinstance(pickle, list): # sometimes pickle is a dict and len(pickle) returns True. # since we access index 0 we should check if the type is list raise UnsupportedFormat(error_details) # if list of rows if isinstance(pickle[0], list): self.wipe() for row in pickle: self.append(Row(row)) # if list of objects elif isinstance(pickle[0], dict): self.wipe() self.headers = list(pickle[0].keys()) for row in pickle: self.append(Row(list(row.values()))) else: raise UnsupportedFormat(error_details) dict = property(_get_dict, _set_dict) def _clean_col(self, col): """Prepares the given column for insert/append. `col` is not supposed to contain any header value. """ col = list(col) if len(col) == 1 and hasattr(col[0], '__call__'): col = list(map(col[0], self._data)) return col @property def height(self): """The number of rows currently in the :class:`Dataset`. Cannot be directly modified. """ return len(self._data) @property def width(self): """The number of columns currently in the :class:`Dataset`. Cannot be directly modified. """ try: return len(self._data[0]) except IndexError: try: return len(self.headers) except TypeError: return 0 def load(self, in_stream, format=None, **kwargs): """ Import `in_stream` to the :class:`Dataset` object using the `format`. `in_stream` can be a file-like object, a string, or a bytestring. :param \\*\\*kwargs: (optional) custom configuration to the format `import_set`. """ stream = normalize_input(in_stream) if not format: format = detect_format(stream) fmt = registry.get_format(format) if not hasattr(fmt, 'import_set'): raise UnsupportedFormat(f'Format {format} cannot be imported.') fmt.import_set(self, stream, **kwargs) return self def export(self, format, **kwargs): """ Export :class:`Dataset` object to `format`. :param \\*\\*kwargs: (optional) custom configuration to the format `export_set`. """ fmt = registry.get_format(format) if not hasattr(fmt, 'export_set'): raise UnsupportedFormat(f'Format {format} cannot be exported.') return fmt.export_set(self, **kwargs) # ---- # Rows # ---- def insert(self, index, row, tags=()): """Inserts a row to the :class:`Dataset` at the given index. Rows inserted must be the correct size (height or width). The default behaviour is to insert the given row to the :class:`Dataset` object at the given index. You can add :ref:`tags <tags>` to the row you are inserting. This gives you the ability to :method:`filter <Dataset.filter>` your :class:`Dataset` later. """ self._validate(row) if len(row) < self.width: for pos, func in self._dynamic_columns.items(): row = list(row) row.insert(pos, func(row)) self._data.insert(index, Row(row, tags=tags)) def rpush(self, row, tags=()): """Adds a row to the end of the :class:`Dataset`. See :method:`Dataset.insert` for additional documentation. """ self.insert(self.height, row=row, tags=tags) def lpush(self, row, tags=()): """Adds a row to the top of the :class:`Dataset`. See :method:`Dataset.insert` for additional documentation. """ self.insert(0, row=row, tags=tags) def append(self, row, tags=()): """Adds a row to the :class:`Dataset`. See :method:`Dataset.insert` for additional documentation. """ self.rpush(row, tags) def extend(self, rows, tags=()): """Adds a list of rows to the :class:`Dataset` using :method:`Dataset.append` """ for row in rows: self.append(row, tags) def lpop(self): """Removes and returns the first row of the :class:`Dataset`.""" cache = self[0] del self[0] return cache def rpop(self): """Removes and returns the last row of the :class:`Dataset`.""" cache = self[-1] del self[-1] return cache def pop(self): """Removes and returns the last row of the :class:`Dataset`.""" return self.rpop() def get(self, index): """Returns the row from the :class:`Dataset` at the given index.""" if isinstance(index, int): return self[index] raise TypeError('Row indices must be integers.') # ------- # Columns # ------- def insert_col(self, index, col=None, header=None): """Inserts a column to the :class:`Dataset` at the given index. Columns inserted must be the correct height. You can also insert a column of a single callable object, which will add a new column with the return values of the callable each as an item in the column. :: data.append_col(col=random.randint) If inserting a column, and :attr:`Dataset.headers` is set, the header attribute must be set, and will be considered the header for that row. See :ref:`dyncols` for an in-depth example. """ if col is None: col = [] # Callable Columns... if callable(col): self._dynamic_columns[self.width] = col col = list(map(col, self._data)) col = self._clean_col(col) self._validate(col=col) if self.headers: # pop the first item off, add to headers if not header: raise HeadersNeeded() # corner case - if header is set without data elif header and self.height == 0 and len(col): raise InvalidDimensions self.headers.insert(index, header) if self.height and self.width: for i, row in enumerate(self._data): row.insert(index, col[i]) self._data[i] = row else: self._data = [Row([row]) for row in col] def rpush_col(self, col, header=None): """Adds a column to the end of the :class:`Dataset`. See :method:`Dataset.insert` for additional documentation. """ self.insert_col(self.width, col, header=header) def lpush_col(self, col, header=None): """Adds a column to the top of the :class:`Dataset`. See :method:`Dataset.insert` for additional documentation. """ self.insert_col(0, col, header=header) def insert_separator(self, index, text='-'): """Adds a separator to :class:`Dataset` at given index.""" sep = (index, text) self._separators.append(sep) def append_separator(self, text='-'): """Adds a :ref:`separator <separators>` to the :class:`Dataset`.""" # change offsets if headers are or aren't defined if not self.headers: index = self.height if self.height else 0 else: index = (self.height + 1) if self.height else 1 self.insert_separator(index, text) def append_col(self, col, header=None): """Adds a column to the :class:`Dataset`. See :method:`Dataset.insert_col` for additional documentation. """ self.rpush_col(col, header) def get_col(self, index): """Returns the column from the :class:`Dataset` at the given index.""" return [row[index] for row in self._data] # ---- # Misc # ---- def add_formatter(self, col, handler): """Adds a formatter to the :class:`Dataset`. :param col: column to. Accepts index int, header str, or None to apply the formatter to all columns. :param handler: reference to callback function to execute against each cell value. """ if isinstance(col, str): if col in self.headers: col = self.headers.index(col) # get 'key' index from each data else: raise KeyError if col is None or col <= self.width: self._formatters.append((col, handler)) else: raise InvalidDatasetIndex return True def filter(self, tag): """Returns a new instance of the :class:`Dataset`, excluding any rows that do not contain the given :ref:`tags <tags>`. """ _dset = copy(self) _dset._data = [row for row in _dset._data if row.has_tag(tag)] return _dset def sort(self, col, reverse=False): """Sort a :class:`Dataset` by a specific column, given string (for header) or integer (for column index). The order can be reversed by setting ``reverse`` to ``True``. Returns a new :class:`Dataset` instance where columns have been sorted. """ if isinstance(col, str): if not self.headers: raise HeadersNeeded _sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse) _dset = Dataset(headers=self.headers, title=self.title) for item in _sorted: row = [item[key] for key in self.headers] _dset.append(row=row) else: if self.headers: col = self.headers[col] _sorted = sorted(self.dict, key=itemgetter(col), reverse=reverse) _dset = Dataset(headers=self.headers, title=self.title) for item in _sorted: if self.headers: row = [item[key] for key in self.headers] else: row = item _dset.append(row=row) return _dset def _transpose_with_headers(self): """Transpose a :class:`Dataset`, turning rows into columns and vice versa, returning a new ``Dataset`` instance. The first row of the original instance becomes the new header row.""" _dset = Dataset() # The first element of the headers stays in the headers, # it is our "hinge" on which we rotate the data new_headers = [self.headers[0]] + self[self.headers[0]] _dset.headers = new_headers for index, column in enumerate(self.headers): if column == self.headers[0]: # It's in the headers, so skip it continue # Adding the column name as now they're a regular column # Use `get_col(index)` in case there are repeated values row_data = [column] + self.get_col(index) row_data = Row(row_data) _dset.append(row=row_data) return _dset def _transpose_without_headers(self): """Transpose a :class:`Dataset`, turning rows into columns and vice versa, returning a new ``Dataset`` instance. This instance should not have headers, or the dimension would be invalid.""" _dset = Dataset() # Add columns as rows in new instance for index in range(len(self._data[0])): row_data = self.get_col(index) _dset.append(row=row_data) return _dset def transpose(self): """Transpose a :class:`Dataset`, turning rows into columns and vice versa, returning a new ``Dataset`` instance. If the instance has headers, the first row of the original instance becomes the new header row.""" # Don't transpose if there is no data if not self: return if self.headers is None: return self._transpose_without_headers() else: return self._transpose_with_headers() def stack(self, other): """Stack two :class:`Dataset` instances together by joining at the row level, and return new combined ``Dataset`` instance.""" if not isinstance(other, Dataset): return if self.width != other.width: raise InvalidDimensions # Copy the source data _dset = copy(self) rows_to_stack = list(_dset._data) other_rows = list(other._data) rows_to_stack.extend(other_rows) _dset._data = rows_to_stack return _dset def stack_cols(self, other): """Stack two :class:`Dataset` instances together by joining at the column level, and return a new combined ``Dataset`` instance. If either ``Dataset`` has headers set, than the other must as well.""" if not isinstance(other, Dataset): return if self.headers or other.headers: if not self.headers or not other.headers: raise HeadersNeeded if self.height != other.height: raise InvalidDimensions try: new_headers = self.headers + other.headers except TypeError: new_headers = None _dset = Dataset() for column in self.headers: _dset.append_col(col=self[column]) for column in other.headers: _dset.append_col(col=other[column]) _dset.headers = new_headers return _dset def remove_duplicates(self): """Removes all duplicate rows from the :class:`Dataset` object while maintaining the original order.""" seen = set() self._data[:] = [ row for row in self._data if not (tuple(row) in seen or seen.add(tuple(row))) ] def wipe(self): """Removes all content and headers from the :class:`Dataset` object.""" self._data = [] self.__headers = None def subset(self, rows=None, cols=None): """Returns a new instance of the :class:`Dataset`, including only specified rows and columns. """ # Don't return if no data if not self: return if rows is None: rows = list(range(self.height)) if cols is None: cols = list(self.headers) # filter out impossible rows and columns rows = [row for row in rows if row in range(self.height)] cols = [header for header in cols if header in self.headers] _dset = Dataset() # filtering rows and columns _dset.headers = list(cols) _dset._data = [] for row_no, row in enumerate(self._data): data_row = [] for key in _dset.headers: if key in self.headers: pos = self.headers.index(key) data_row.append(row[pos]) else: raise KeyError if row_no in rows: _dset.append(row=Row(data_row)) return _dset
Dataset
python
astropy__astropy
astropy/time/formats.py
{ "start": 76795, "end": 78513 }
class ____(TimeString): """ Base class to support string Besselian and Julian epoch dates such as 'B1950.0' or 'J2000.0' respectively. """ _default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): epoch_prefix = self.epoch_prefix # Be liberal in what we accept: convert bytes to ascii. to_string = ( str if val1.dtype.kind == "U" else lambda x: str(x.item(), encoding="ascii") ) iterator = np.nditer( [val1, None], op_dtypes=[val1.dtype, np.double], flags=["zerosize_ok"] ) for val, years in iterator: try: time_str = to_string(val) epoch_type, year_str = time_str[0], time_str[1:] year = float(year_str) if epoch_type.upper() != epoch_prefix: raise ValueError except (IndexError, ValueError, UnicodeEncodeError): raise ValueError(f"Time {val} does not match {self.name} format") else: years[...] = year self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(iterator.operands[-1]) self.jd1, self.jd2 = day_frac(jd1, jd2) @property def value(self): jd_to_epoch = getattr(erfa, self.jd_to_epoch) years = jd_to_epoch(self.jd1, self.jd2) # Use old-style format since it is a factor of 2 faster str_fmt = self.epoch_prefix + "%." + str(self.precision) + "f" outs = [str_fmt % year for year in years.flat] return np.array(outs).reshape(self.jd1.shape)
TimeEpochDateString
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/model.py
{ "start": 568, "end": 1106 }
class ____(BaseModel): """pydantic BaseModel configured with recommended default settings for use with the Resolved framework. Extra fields are disallowed when instantiating this model to help catch errors earlier. Example: .. code-block:: python import dagster as dg class MyModel(dg.Resolvable, dg.Model): name: str age: int # raises exception MyModel(name="John", age=30, other="field") """ model_config = ConfigDict(extra="forbid") @dataclass
Model
python
ansible__ansible
lib/ansible/modules/hostname.py
{ "start": 25561, "end": 25686 }
class ____(Hostname): platform = 'Linux' distribution = 'Altlinux' strategy_class = RedHatStrategy
ALTLinuxHostname
python
tornadoweb__tornado
tornado/test/httpclient_test.py
{ "start": 2105, "end": 2220 }
class ____(RequestHandler): def get(self): self.finish(self.request.headers["Authorization"])
AuthHandler
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/concepts/resources/pythonic_resources.py
{ "start": 10123, "end": 23026 }
class ____: def connect(self) -> Connection: return Connection() def create_engine(*args, **kwargs): return Engine() def raw_github_resource_dep() -> None: # start_raw_github_resource_dep import dagster as dg class DBResource(dg.ConfigurableResource): engine: dg.ResourceDependency[Engine] def query(self, query: str): with self.engine.connect() as conn: return conn.execute(query) engine = create_engine(...) @dg.definitions def resources(): return dg.Definitions( resources={"db": DBResource(engine=engine)}, ) # end_raw_github_resource_dep def resource_adapter() -> None: # start_resource_adapter import dagster as dg # Old code, interface cannot be changed for back-compat purposes class Writer: def __init__(self, prefix: str): self._prefix = prefix def output(self, text: str) -> None: print(self._prefix + text) @dg.resource(config_schema={"prefix": str}) def writer_resource(context): prefix = context.resource_config["prefix"] return Writer(prefix) # New adapter layer class WriterResource(dg.ConfigurableLegacyResourceAdapter): prefix: str @property def wrapped_resource(self) -> dg.ResourceDefinition: return writer_resource @dg.asset def my_asset(writer: Writer): writer.output("hello, world!") @dg.definitions def resources(): return dg.Definitions(resources={"writer": WriterResource(prefix="greeting: ")}) # end_resource_adapter def io_adapter() -> None: # start_io_adapter import dagster as dg import os # Old code, interface cannot be changed for back-compat purposes class OldFileIOManager(dg.IOManager): def __init__(self, base_path: str): self.base_path = base_path def handle_output(self, context: dg.OutputContext, obj): with open( os.path.join(self.base_path, context.step_key, context.name), "w" ) as fd: fd.write(obj) def load_input(self, context: dg.InputContext): with open( os.path.join( self.base_path, context.upstream_output.step_key, # type: ignore context.upstream_output.name, # type: ignore ), ) as fd: return fd.read() @dg.io_manager(config_schema={"base_path": str}) def old_file_io_manager(context): base_path = context.resource_config["base_path"] return OldFileIOManager(base_path) # New adapter layer class MyIOManager(dg.ConfigurableLegacyIOManagerAdapter): base_path: str @property def wrapped_io_manager(self) -> dg.IOManagerDefinition: return old_file_io_manager @dg.definitions def resources(): return dg.Definitions( resources={ "dg.io_manager": MyIOManager(base_path="/tmp/"), }, ) # end_io_adapter def impl_details_resolve() -> None: # start_impl_details_resolve import dagster as dg class CredentialsResource(dg.ConfigurableResource): username: str password: str class FileStoreBucket(dg.ConfigurableResource): credentials: CredentialsResource region: str def write(self, data: str): # In this context, `self.credentials` is ensured to # be a CredentialsResource with valid values for # `username` and `password` get_filestore_client( username=self.credentials.username, password=self.credentials.password, region=self.region, ).write(data) # unconfigured_credentials_resource is typed as PartialResource[CredentialsResource] unconfigured_credentials_resource = CredentialsResource.configure_at_launch() # FileStoreBucket constructor accepts either a CredentialsResource or a # PartialResource[CredentialsResource] for the `credentials` argument bucket = FileStoreBucket( credentials=unconfigured_credentials_resource, region="us-east-1", ) # end_impl_details_resolve def write_csv(path: str, obj: Any): pass def read_csv(path: str): pass def new_io_manager() -> None: # start_new_io_manager import dagster as dg class MyIOManager(dg.ConfigurableIOManager): root_path: str def _get_path(self, asset_key: dg.AssetKey) -> str: return self.root_path + "/".join(asset_key.path) def handle_output(self, context: dg.OutputContext, obj): write_csv(self._get_path(context.asset_key), obj) def load_input(self, context: dg.InputContext): return read_csv(self._get_path(context.asset_key)) @dg.definitions def resources(): return dg.Definitions( resources={"dg.io_manager": MyIOManager(root_path="/tmp/")}, ) # end_new_io_manager def raw_github_resource_factory() -> None: # start_raw_github_resource_factory import dagster as dg class GitHubResource(dg.ConfigurableResourceFactory[GitHub]): access_token: str def create_resource(self, _context) -> GitHub: return GitHub(self.access_token) @dg.asset def public_github_repos(github: dg.Resource[GitHub]): return github.organization("dagster-io").repositories() @dg.definitions def resources(): return dg.Definitions( resources={ "github": GitHubResource(access_token=dg.EnvVar("GITHUB_ACCESS_TOKEN")) }, ) # end_raw_github_resource_factory def new_resource_testing_with_context(): # start_new_resource_testing_with_context import dagster as dg from typing import Optional class MyContextResource(dg.ConfigurableResource[GitHub]): base_path: Optional[str] = None def effective_base_path(self) -> str: if self.base_path: return self.base_path instance = self.get_resource_context().instance assert instance return instance.storage_directory() # end_new_resource_testing_with_context # start_test_my_context_resource def test_my_context_resource(): with dg.DagsterInstance.ephemeral() as instance: context = dg.build_init_resource_context(instance=instance) assert ( MyContextResource(base_path=None) .with_resource_context(context) .effective_base_path() == instance.storage_directory() ) # end_test_my_context_resource def with_state_example() -> None: # start_with_state_example import dagster as dg import requests from pydantic import PrivateAttr class MyClientResource(dg.ConfigurableResource): username: str password: str _api_token: str = PrivateAttr() def setup_for_execution(self, context: dg.InitResourceContext) -> None: # Fetch and set up an API token based on the username and password self._api_token = requests.get( "https://my-api.com/token", auth=(self.username, self.password) ).text def get_all_users(self): return requests.get( "https://my-api.com/users", headers={"Authorization": self._api_token}, ) @dg.asset def my_asset(client: MyClientResource): return client.get_all_users() # end_with_state_example def with_complex_state_example() -> None: # start_with_complex_state_example import dagster as dg from contextlib import contextmanager from pydantic import PrivateAttr class DBConnection: ... def query(self, body: str): ... @contextmanager # type: ignore def get_database_connection(username: str, password: str): ... class MyClientResource(dg.ConfigurableResource): username: str password: str _db_connection: DBConnection = PrivateAttr() @contextmanager def yield_for_execution(self, context: dg.InitResourceContext): # keep connection open for the duration of the execution with get_database_connection(self.username, self.password) as conn: # set up the connection attribute so it can be used in the execution self._db_connection = conn # yield, allowing execution to occur yield self def query(self, body: str): return self._db_connection.query(body) @dg.asset def my_asset(client: MyClientResource): client.query("SELECT * FROM my_table") # end_with_complex_state_example def new_resource_testing_with_state_ops() -> None: # start_new_resource_testing_with_state_ops import dagster as dg from unittest import mock class MyClient: ... def query(self, body: str): ... class MyClientResource(dg.ConfigurableResource): username: str password: str def get_client(self): return MyClient(self.username, self.password) @dg.op def my_op(client: MyClientResource): return client.get_client().query("SELECT * FROM my_table") def test_my_op(): class FakeClient: def query(self, body: str): assert body == "SELECT * FROM my_table" return "my_result" mocked_client_resource = mock.Mock() mocked_client_resource.get_client.return_value = FakeClient() assert my_op(mocked_client_resource) == "my_result" # end_new_resource_testing_with_state_ops def new_resource_on_sensor() -> None: # start_new_resource_on_sensor import dagster as dg import requests class UsersAPI(dg.ConfigurableResource): url: str def fetch_users(self) -> list[str]: return requests.get(self.url).json() @dg.job def process_user(): ... @dg.sensor(job=process_user) def process_new_users_sensor( context: dg.SensorEvaluationContext, users_api: UsersAPI, ): last_user = int(context.cursor) if context.cursor else 0 users = users_api.fetch_users() num_users = len(users) for user_id in users[last_user:]: yield dg.RunRequest( run_key=user_id, tags={"user_id": user_id}, ) context.update_cursor(str(num_users)) # end_new_resource_on_sensor # start_new_resource_on_sensor_defs @dg.definitions def resources(): return dg.Definitions( resources={"users_api": UsersAPI(url="https://my-api.com/users")}, ) # end_new_resource_on_sensor_defs # start_test_resource_on_sensor import dagster as dg def test_process_new_users_sensor(): class FakeUsersAPI: def fetch_users(self) -> list[str]: return ["1", "2", "3"] context = dg.build_sensor_context() run_requests = process_new_users_sensor(context, users_api=FakeUsersAPI()) assert len(run_requests) == 3 # end_test_resource_on_sensor def new_resource_on_schedule() -> None: # start_new_resource_on_schedule import dagster as dg from datetime import datetime class DateFormatter(dg.ConfigurableResource): format: str def strftime(self, dt: datetime) -> str: return dt.strftime(self.format) @dg.job def process_data(): ... @dg.schedule(job=process_data, cron_schedule="* * * * *") def process_data_schedule( context: dg.ScheduleEvaluationContext, date_formatter: DateFormatter, ): formatted_date = date_formatter.strftime(context.scheduled_execution_time) return dg.RunRequest( run_key=None, tags={"date": formatted_date}, ) # end_new_resource_on_schedule # start_new_resource_on_schedule_defs @dg.definitions def resources(): return dg.Definitions( resources={"date_formatter": DateFormatter(format="%Y-%m-%d")}, ) # end_new_resource_on_schedule_defs # start_test_resource_on_schedule import dagster as dg def test_process_data_schedule(): context = dg.build_schedule_context( scheduled_execution_time=datetime.datetime(2020, 1, 1) ) run_request = process_data_schedule( context, date_formatter=DateFormatter(format="%Y-%m-%d") ) assert ( run_request.run_config["ops"]["fetch_data"]["config"]["date"] == "2020-01-01" ) # end_test_resource_on_schedule
Engine
python
python-attrs__attrs
typing-examples/baseline.py
{ "start": 1066, "end": 1160 }
class ____: num: int = attrs.field(validator=attrs.validators.ge(0)) @attrs.define
Validated
python
pandas-dev__pandas
pandas/tests/arithmetic/test_timedelta64.py
{ "start": 53513, "end": 81512 }
class ____: # Tests for timedelta64[ns] # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ # ------------------------------------------------------------------ # Multiplication # organized with scalar others first, then array-like def test_td64arr_mul_int(self, box_with_array): idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box_with_array) result = idx * 1 tm.assert_equal(result, idx) result = 1 * idx tm.assert_equal(result, idx) def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array): rng = timedelta_range("1 days", "10 days", name="foo") rng = tm.box_expected(rng, box_with_array) msg = "|".join( [ "argument must be an integer", "cannot use operands with types dtype", "Cannot multiply with", ] ) with pytest.raises(TypeError, match=msg): rng * two_hours def test_tdi_mul_int_array_zerodim(self, box_with_array): rng5 = np.arange(5, dtype="int64") idx = TimedeltaIndex(rng5) expected = TimedeltaIndex(rng5 * 5) idx = tm.box_expected(idx, box_with_array) expected = tm.box_expected(expected, box_with_array) result = idx * np.array(5, dtype="int64") tm.assert_equal(result, expected) def test_tdi_mul_int_array(self, box_with_array): rng5 = np.arange(5, dtype="int64") idx = TimedeltaIndex(rng5) expected = TimedeltaIndex(rng5**2) idx = tm.box_expected(idx, box_with_array) expected = tm.box_expected(expected, box_with_array) result = idx * rng5 tm.assert_equal(result, expected) def test_tdi_mul_int_series(self, box_with_array): box = box_with_array xbox = Series if box in [Index, tm.to_array, pd.array] else box idx = TimedeltaIndex(np.arange(5, dtype="int64")) expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2) idx = tm.box_expected(idx, box) expected = tm.box_expected(expected, xbox) result = idx * Series(np.arange(5, dtype="int64")) tm.assert_equal(result, expected) def test_tdi_mul_float_series(self, box_with_array): box = box_with_array xbox = Series if box in [Index, tm.to_array, pd.array] else box idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box) rng5f = np.arange(5, dtype="float64") expected = TimedeltaIndex(rng5f * (rng5f + 1.0)) expected = tm.box_expected(expected, xbox) result = idx * Series(rng5f + 1.0) tm.assert_equal(result, expected) # TODO: Put Series/DataFrame in others? @pytest.mark.parametrize( "other", [ np.arange(1, 11), Index(np.arange(1, 11), np.int64), Index(range(1, 11), np.uint64), Index(range(1, 11), np.float64), pd.RangeIndex(1, 11), ], ids=lambda x: type(x).__name__, ) def test_tdi_rmul_arraylike(self, other, box_with_array): box = box_with_array tdi = TimedeltaIndex(["1 Day"] * 10) expected = timedelta_range("1 days", "10 days")._with_freq(None) tdi = tm.box_expected(tdi, box) xbox = get_upcast_box(tdi, other) expected = tm.box_expected(expected, xbox) result = other * tdi tm.assert_equal(result, expected) commute = tdi * other tm.assert_equal(commute, expected) def test_td64arr_mul_bool_scalar_raises(self, box_with_array): # GH#58054 ser = Series(np.arange(5) * timedelta(hours=1), dtype="m8[ns]") obj = tm.box_expected(ser, box_with_array) msg = r"Cannot multiply 'timedelta64\[ns\]' by bool" with pytest.raises(TypeError, match=msg): True * obj with pytest.raises(TypeError, match=msg): obj * True with pytest.raises(TypeError, match=msg): np.True_ * obj with pytest.raises(TypeError, match=msg): obj * np.True_ @pytest.mark.parametrize( "dtype", [ bool, "boolean", pytest.param("bool[pyarrow]", marks=td.skip_if_no("pyarrow")), ], ) def test_td64arr_mul_bool_raises(self, dtype, box_with_array): # GH#58054 ser = Series(np.arange(5) * timedelta(hours=1), dtype="m8[ns]") obj = tm.box_expected(ser, box_with_array) other = Series(np.arange(5) < 0.5, dtype=dtype) other = tm.box_expected(other, box_with_array) msg = r"Cannot multiply 'timedelta64\[ns\]' by bool" with pytest.raises(TypeError, match=msg): obj * other msg2 = msg.replace("rmul", "mul") if dtype == "bool[pyarrow]": # We go through ArrowEA.__mul__ which gives a different message msg2 = ( r"operation 'mul' not supported for dtype 'bool\[pyarrow\]' " r"with dtype 'timedelta64\[ns\]'" ) with pytest.raises(TypeError, match=msg2): other * obj @pytest.mark.parametrize( "dtype", [ "Int64", "Float64", pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")), ], ) def test_td64arr_mul_masked(self, dtype, box_with_array): ser = Series(np.arange(5) * timedelta(hours=1), dtype="m8[ns]") obj = tm.box_expected(ser, box_with_array) other = Series(np.arange(5), dtype=dtype) other = tm.box_expected(other, box_with_array) expected = Series([Timedelta(hours=n**2) for n in range(5)]) expected = tm.box_expected(expected, box_with_array) if dtype == "int64[pyarrow]": expected = expected.astype("duration[ns][pyarrow]") result = obj * other tm.assert_equal(result, expected) result = other * obj tm.assert_equal(result, expected) # ------------------------------------------------------------------ # __div__, __rdiv__ def test_td64arr_div_nat_invalid(self, box_with_array): # don't allow division by NaT (maybe could in the future) rng = timedelta_range("1 days", "10 days", name="foo") rng = tm.box_expected(rng, box_with_array) with pytest.raises(TypeError, match="unsupported operand type"): rng / NaT with pytest.raises(TypeError, match="Cannot divide NaTType by"): NaT / rng dt64nat = np.datetime64("NaT", "ns") msg = "|".join( [ # 'divide' on npdev as of 2021-12-18 "ufunc '(true_divide|divide)' cannot use operands", "cannot perform __r?truediv__", "Cannot divide datetime64 by TimedeltaArray", ] ) with pytest.raises(TypeError, match=msg): rng / dt64nat with pytest.raises(TypeError, match=msg): dt64nat / rng def test_td64arr_div_td64nat(self, box_with_array): # GH#23829 box = box_with_array xbox = np.ndarray if box is pd.array else box rng = timedelta_range("1 days", "10 days") rng = tm.box_expected(rng, box) other = np.timedelta64("NaT") expected = np.array([np.nan] * 10) expected = tm.box_expected(expected, xbox) result = rng / other tm.assert_equal(result, expected) result = other / rng tm.assert_equal(result, expected) def test_td64arr_div_int(self, box_with_array): idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box_with_array) result = idx / 1 tm.assert_equal(result, idx) with pytest.raises(TypeError, match="Cannot divide"): # GH#23829 1 / idx def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array): # GH#20088, GH#22163 ensure DataFrame returns correct dtype box = box_with_array xbox = np.ndarray if box is pd.array else box rng = timedelta_range("1 days", "10 days", name="foo") expected = Index((np.arange(10) + 1) * 12, dtype=np.float64, name="foo") rng = tm.box_expected(rng, box) expected = tm.box_expected(expected, xbox) result = rng / two_hours tm.assert_equal(result, expected) result = two_hours / rng expected = 1 / expected tm.assert_equal(result, expected) @pytest.mark.parametrize("m", [1, 3, 10]) @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): box = box_with_array xbox = np.ndarray if box is pd.array else box ser = Series([Timedelta(days=59)] * 3) ser[2] = np.nan flat = ser ser = tm.box_expected(ser, box) # op expected = Series([x / np.timedelta64(m, unit) for x in flat]) expected = tm.box_expected(expected, xbox) result = ser / np.timedelta64(m, unit) tm.assert_equal(result, expected) # reverse op expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat]) expected = tm.box_expected(expected, xbox) result = np.timedelta64(m, unit) / ser tm.assert_equal(result, expected) def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array): box = box_with_array xbox = np.ndarray if box is pd.array else box rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") expected = Index([12, np.nan, 24], dtype=np.float64, name="foo") rng = tm.box_expected(rng, box) expected = tm.box_expected(expected, xbox) result = rng / two_hours tm.assert_equal(result, expected) result = two_hours / rng expected = 1 / expected tm.assert_equal(result, expected) def test_td64arr_div_td64_ndarray(self, box_with_array): # GH#22631 box = box_with_array xbox = np.ndarray if box is pd.array else box rng = TimedeltaIndex(["1 days", NaT, "2 days"]) expected = Index([12, np.nan, 24], dtype=np.float64) rng = tm.box_expected(rng, box) expected = tm.box_expected(expected, xbox) other = np.array([2, 4, 2], dtype="m8[h]") result = rng / other tm.assert_equal(result, expected) result = rng / tm.box_expected(other, box) tm.assert_equal(result, expected) result = rng / other.astype(object) tm.assert_equal(result, expected.astype(object)) result = rng / list(other) tm.assert_equal(result, expected) # reversed op expected = 1 / expected result = other / rng tm.assert_equal(result, expected) result = tm.box_expected(other, box) / rng tm.assert_equal(result, expected) result = other.astype(object) / rng tm.assert_equal(result, expected) result = list(other) / rng tm.assert_equal(result, expected) def test_tdarr_div_length_mismatch(self, box_with_array): rng = TimedeltaIndex(["1 days", NaT, "2 days"]) mismatched = [1, 2, 3, 4] rng = tm.box_expected(rng, box_with_array) msg = "Cannot divide vectors|Unable to coerce to Series" for obj in [mismatched, mismatched[:2]]: # one shorter, one longer for other in [obj, np.array(obj), Index(obj)]: with pytest.raises(ValueError, match=msg): rng / other with pytest.raises(ValueError, match=msg): other / rng def test_td64_div_object_mixed_result(self, box_with_array): # Case where we having a NaT in the result inseat of timedelta64("NaT") # is misleading orig = timedelta_range("1 Day", periods=3).insert(1, NaT) tdi = tm.box_expected(orig, box_with_array, transpose=False) other = np.array([orig[0], 1.5, 2.0, orig[2]], dtype=object) other = tm.box_expected(other, box_with_array, transpose=False) res = tdi / other expected = Index([1.0, np.timedelta64("NaT", "ns"), orig[0], 1.5], dtype=object) expected = tm.box_expected(expected, box_with_array, transpose=False) if isinstance(expected, NumpyExtensionArray): expected = expected.to_numpy() tm.assert_equal(res, expected) if box_with_array is DataFrame: # We have an np.timedelta64(NaT), not pd.NaT assert isinstance(res.iloc[1, 0], np.timedelta64) res = tdi // other expected = Index([1, np.timedelta64("NaT", "ns"), orig[0], 1], dtype=object) expected = tm.box_expected(expected, box_with_array, transpose=False) if isinstance(expected, NumpyExtensionArray): expected = expected.to_numpy() tm.assert_equal(res, expected) if box_with_array is DataFrame: # We have an np.timedelta64(NaT), not pd.NaT assert isinstance(res.iloc[1, 0], np.timedelta64) # ------------------------------------------------------------------ # __floordiv__, __rfloordiv__ @pytest.mark.skipif(WASM, reason="no fp exception support in wasm") def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array): # GH#35529 box = box_with_array xbox = np.ndarray if box is pd.array else box left = Series([1000, 222330, 30], dtype="timedelta64[ns]") right = Series([1000, 222330, None], dtype="timedelta64[ns]") left = tm.box_expected(left, box) right = tm.box_expected(right, box) expected = np.array([1.0, 1.0, np.nan], dtype=np.float64) expected = tm.box_expected(expected, xbox) with tm.maybe_produces_warning( RuntimeWarning, box is pd.array, check_stacklevel=False ): result = left // right tm.assert_equal(result, expected) # case that goes through __rfloordiv__ with arraylike with tm.maybe_produces_warning( RuntimeWarning, box is pd.array, check_stacklevel=False ): result = np.asarray(left) // right tm.assert_equal(result, expected) @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning") def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td): # GH#18831, GH#19125 box = box_with_array xbox = np.ndarray if box is pd.array else box td = Timedelta("5m3s") # i.e. (scalar_td - 1sec) / 2 td1 = Series([td, td, NaT], dtype="m8[ns]") td1 = tm.box_expected(td1, box, transpose=False) expected = Series([0, 0, np.nan]) expected = tm.box_expected(expected, xbox, transpose=False) result = td1 // scalar_td tm.assert_equal(result, expected) # Reversed op expected = Series([2, 2, np.nan]) expected = tm.box_expected(expected, xbox, transpose=False) result = scalar_td // td1 tm.assert_equal(result, expected) # same thing buts let's be explicit about calling __rfloordiv__ result = td1.__rfloordiv__(scalar_td) tm.assert_equal(result, expected) def test_td64arr_floordiv_int(self, box_with_array): idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box_with_array) result = idx // 1 tm.assert_equal(result, idx) pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*" with pytest.raises(TypeError, match=pattern): 1 // idx # ------------------------------------------------------------------ # mod, divmod # TODO: operations with timedelta-like arrays, numeric arrays, # reversed ops def test_td64arr_mod_tdscalar( self, performance_warning, box_with_array, three_days ): tdi = timedelta_range("1 Day", "9 days") tdarr = tm.box_expected(tdi, box_with_array) expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3) expected = tm.box_expected(expected, box_with_array) if isinstance(three_days, offsets.Day): msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): tdarr % three_days with pytest.raises(TypeError, match=msg): divmod(tdarr, three_days) with pytest.raises(TypeError, match=msg): tdarr // three_days return result = tdarr % three_days tm.assert_equal(result, expected) if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset): # TODO: making expected be object here a result of DataFrame.__divmod__ # being defined in a naive way that does not dispatch to the underlying # array's __divmod__ expected = expected.astype(object) else: performance_warning = False with tm.assert_produces_warning(performance_warning): result = divmod(tdarr, three_days) tm.assert_equal(result[1], expected) tm.assert_equal(result[0], tdarr // three_days) def test_td64arr_mod_int(self, box_with_array): tdi = timedelta_range("1 ns", "10 ns", periods=10) tdarr = tm.box_expected(tdi, box_with_array) expected = TimedeltaIndex(["1 ns", "0 ns"] * 5) expected = tm.box_expected(expected, box_with_array) result = tdarr % 2 tm.assert_equal(result, expected) msg = "Cannot divide int by" with pytest.raises(TypeError, match=msg): 2 % tdarr result = divmod(tdarr, 2) tm.assert_equal(result[1], expected) tm.assert_equal(result[0], tdarr // 2) def test_td64arr_rmod_tdscalar(self, box_with_array, three_days): tdi = timedelta_range("1 Day", "9 days") tdarr = tm.box_expected(tdi, box_with_array) expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6 expected = TimedeltaIndex(expected) expected = tm.box_expected(expected, box_with_array) if isinstance(three_days, offsets.Day): msg = "Cannot divide Day by TimedeltaArray" with pytest.raises(TypeError, match=msg): three_days % tdarr return result = three_days % tdarr tm.assert_equal(result, expected) result = divmod(three_days, tdarr) tm.assert_equal(result[1], expected) tm.assert_equal(result[0], three_days // tdarr) # ------------------------------------------------------------------ # Operations with invalid others def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td): td1 = Series([timedelta(minutes=5, seconds=3)] * 3) td1.iloc[2] = np.nan td1 = tm.box_expected(td1, box_with_array) # check that we are getting a TypeError # with 'operate' (from core/ops.py) for the ops that are not # defined pattern = "operate|unsupported|cannot|not supported" with pytest.raises(TypeError, match=pattern): td1 * scalar_td with pytest.raises(TypeError, match=pattern): scalar_td * td1 def test_td64arr_mul_too_short_raises(self, box_with_array): idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box_with_array) msg = "|".join( [ "cannot use operands with types dtype", "Cannot multiply with unequal lengths", "Unable to coerce to Series", ] ) with pytest.raises(TypeError, match=msg): # length check before dtype check idx * idx[:3] with pytest.raises(ValueError, match=msg): idx * np.array([1, 2]) def test_td64arr_mul_td64arr_raises(self, box_with_array): idx = TimedeltaIndex(np.arange(5, dtype="int64")) idx = tm.box_expected(idx, box_with_array) msg = "cannot use operands with types dtype" with pytest.raises(TypeError, match=msg): idx * idx # ------------------------------------------------------------------ # Operations with numeric others def test_td64arr_mul_numeric_scalar(self, box_with_array, one): # GH#4521 # divide/multiply by integers tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) expected = tm.box_expected(expected, box_with_array) result = tdser * (-one) tm.assert_equal(result, expected) result = (-one) * tdser tm.assert_equal(result, expected) expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]") expected = tm.box_expected(expected, box_with_array) result = tdser * (2 * one) tm.assert_equal(result, expected) result = (2 * one) * tdser tm.assert_equal(result, expected) @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) def test_td64arr_div_numeric_scalar(self, box_with_array, two): # GH#4521 # divide/multiply by integers tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) expected = tm.box_expected(expected, box_with_array) result = tdser / two tm.assert_equal(result, expected) with pytest.raises(TypeError, match="Cannot divide"): two / tdser @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two): tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) expected = tm.box_expected(expected, box_with_array) result = tdser // two tm.assert_equal(result, expected) with pytest.raises(TypeError, match="Cannot divide"): two // tdser @pytest.mark.parametrize( "klass", [np.array, Index, Series], ids=lambda x: x.__name__, ) def test_td64arr_rmul_numeric_array( self, box_with_array, klass, any_real_numpy_dtype, ): # GH#4521 # divide/multiply by integers vector = klass([20, 30, 40]) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) xbox = get_upcast_box(tdser, vector) expected = tm.box_expected(expected, xbox) result = tdser * vector tm.assert_equal(result, expected) result = vector * tdser tm.assert_equal(result, expected) @pytest.mark.parametrize( "klass", [np.array, Index, Series], ids=lambda x: x.__name__, ) def test_td64arr_div_numeric_array( self, box_with_array, klass, any_real_numpy_dtype ): # GH#4521 # divide/multiply by integers vector = klass([20, 30, 40]) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) expected = Series(["2.95D", "1D 23h 12m", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) xbox = get_upcast_box(tdser, vector) expected = tm.box_expected(expected, xbox) result = tdser / vector tm.assert_equal(result, expected) pattern = "|".join( [ "true_divide'? cannot use operands", "cannot perform __div__", "cannot perform __truediv__", "unsupported operand", "Cannot divide", "ufunc 'divide' cannot use operands with types", ] ) with pytest.raises(TypeError, match=pattern): vector / tdser result = tdser / vector.astype(object) if box_with_array is DataFrame: expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))] expected = tm.box_expected(expected, xbox).astype(object) expected[2] = expected[2].fillna(np.timedelta64("NaT", "ns")) else: expected = [tdser[n] / vector[n] for n in range(len(tdser))] expected = [ x if x is not NaT else np.timedelta64("NaT", "ns") for x in expected ] if xbox is tm.to_array: expected = tm.to_array(expected).astype(object) else: expected = xbox(expected, dtype=object) tm.assert_equal(result, expected) with pytest.raises(TypeError, match=pattern): vector.astype(object) / tdser def test_td64arr_mul_int_series(self, box_with_array, names): # GH#19042 test for correct name attachment box = box_with_array exname = get_expected_name(box, names) tdi = TimedeltaIndex( ["0days", "1day", "2days", "3days", "4days"], name=names[0] ) # TODO: Should we be parametrizing over types for `ser` too? ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) expected = Series( ["0days", "1day", "4days", "9days", "16days"], dtype="timedelta64[ns]", name=exname, ) tdi = tm.box_expected(tdi, box) xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) result = ser * tdi tm.assert_equal(result, expected) result = tdi * ser tm.assert_equal(result, expected) # TODO: Should we be parametrizing over types for `ser` too? def test_float_series_rdiv_td64arr(self, box_with_array, names): # GH#19042 test for correct name attachment box = box_with_array tdi = TimedeltaIndex( ["0days", "1day", "2days", "3days", "4days"], name=names[0] ) ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) xname = names[2] if box not in [tm.to_array, pd.array] else names[1] expected = Series( [tdi[n] / ser[n] for n in range(len(ser))], dtype="timedelta64[ns]", name=xname, ) tdi = tm.box_expected(tdi, box) xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) result = ser.__rtruediv__(tdi) if box is DataFrame: assert result is NotImplemented else: tm.assert_equal(result, expected) def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array): # GH#39750 make sure we infer the result as td64 tdi = TimedeltaIndex([NaT, NaT]) left = tm.box_expected(tdi, box_with_array) right = np.array([2, 2.0], dtype=object) tdnat = np.timedelta64("NaT", "ns") expected = Index([tdnat] * 2, dtype=object) if box_with_array is not Index: expected = tm.box_expected(expected, box_with_array).astype(object) if box_with_array in [Series, DataFrame]: expected = expected.fillna(tdnat) # GH#18463 result = left / right tm.assert_equal(result, expected) result = left // right tm.assert_equal(result, expected)
TestTimedeltaArraylikeMulDivOps
python
etianen__django-reversion
tests/test_app/tests/test_commands.py
{ "start": 6417, "end": 7162 }
class ____(TestModelMixin, TestBase): def testDeleteRevisionsDays(self): date_created = timezone.now() - timedelta(days=20) with reversion.create_revision(): TestModel.objects.create() reversion.set_date_created(date_created) self.callCommand("deleterevisions", days=19) self.assertNoRevision() def testDeleteRevisionsDaysNoMatch(self): date_created = timezone.now() - timedelta(days=20) with reversion.create_revision(): obj = TestModel.objects.create() reversion.set_date_created(date_created) self.callCommand("deleterevisions", days=21) self.assertSingleRevision((obj,), date_created=date_created)
DeleteRevisionsDaysTest
python
keras-team__keras
keras/src/metrics/accuracy_metrics_test.py
{ "start": 15008, "end": 17248 }
class ____(testing.TestCase): def test_config(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) self.assertEqual(top_k_cat_acc_obj.name, "top_k_categorical_accuracy") self.assertEqual(len(top_k_cat_acc_obj.variables), 2) self.assertEqual(top_k_cat_acc_obj._dtype, "float32") # Test get_config top_k_cat_acc_obj_config = top_k_cat_acc_obj.get_config() self.assertEqual( top_k_cat_acc_obj_config["name"], "top_k_categorical_accuracy" ) self.assertEqual(top_k_cat_acc_obj_config["dtype"], "float32") self.assertEqual(top_k_cat_acc_obj_config["k"], 1) # Check save and restore config top_k_cat_acc_obj2 = ( accuracy_metrics.TopKCategoricalAccuracy.from_config( top_k_cat_acc_obj_config ) ) self.assertEqual(top_k_cat_acc_obj2.name, "top_k_categorical_accuracy") self.assertEqual(len(top_k_cat_acc_obj2.variables), 2) self.assertEqual(top_k_cat_acc_obj2._dtype, "float32") self.assertEqual(top_k_cat_acc_obj2.k, 1) def test_unweighted(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") top_k_cat_acc_obj.update_state(y_true, y_pred) result = top_k_cat_acc_obj.result() self.assertAllClose(result, 0.5, atol=1e-3) def test_weighted(self): top_k_cat_acc_obj = accuracy_metrics.TopKCategoricalAccuracy( k=1, name="top_k_categorical_accuracy", dtype="float32" ) y_true = np.array([[0, 0, 1], [0, 1, 0]]) y_pred = np.array([[0.1, 0.9, 0.8], [0.05, 0.95, 0]], dtype="float32") sample_weight = np.array([0.7, 0.3]) top_k_cat_acc_obj.update_state( y_true, y_pred, sample_weight=sample_weight ) result = top_k_cat_acc_obj.result() self.assertAllClose(result, 0.3, atol=1e-3)
TopKCategoricalAccuracyTest
python
great-expectations__great_expectations
great_expectations/core/partitioners.py
{ "start": 324, "end": 519 }
class ____(pydantic.BaseModel): column_name: str sort_ascending: bool = True method_name: Literal["partition_on_year_and_month"] = "partition_on_year_and_month"
ColumnPartitionerMonthly
python
ray-project__ray
python/ray/util/tracing/tracing_helper.py
{ "start": 3772, "end": 5024 }
class ____(Exception): pass def _import_from_string(import_str: Union[ModuleType, str]) -> ModuleType: """Given a string that is in format "<module>:<attribute>", import the attribute.""" if not isinstance(import_str, str): return import_str module_str, _, attrs_str = import_str.partition(":") if not module_str or not attrs_str: message = ( 'Import string "{import_str}" must be in format' '"<module>:<attribute>".' ) raise _ImportFromStringError(message.format(import_str=import_str)) try: module = importlib.import_module(module_str) except ImportError as exc: if exc.name != module_str: raise exc from None message = 'Could not import module "{module_str}".' raise _ImportFromStringError(message.format(module_str=module_str)) instance = module try: for attr_str in attrs_str.split("."): instance = getattr(instance, attr_str) except AttributeError: message = 'Attribute "{attrs_str}" not found in module "{module_str}".' raise _ImportFromStringError( message.format(attrs_str=attrs_str, module_str=module_str) ) return instance
_ImportFromStringError
python
pyodide__pyodide
src/py/_pyodide/_core_docs.py
{ "start": 35239, "end": 36784 }
class ____(JsIterable[KT], Generic[KT, VT_co], Mapping[KT, VT_co], metaclass=_ABCMeta): """A JavaScript Map To be considered a map, a JavaScript object must have a ``get`` method, it must have a ``size`` or a ``length`` property which is a number (idiomatically it should be called ``size``) and it must be iterable. """ _js_type_flags = [ "HAS_GET | HAS_LENGTH | IS_ITERABLE", "IS_OBJECT_MAP", "IS_PY_JSON_DICT", ] def __getitem__(self, idx: KT) -> VT_co: raise NotImplementedError def __len__(self) -> int: return 0 def __contains__(self, idx: object) -> bool: raise NotImplementedError def keys(self) -> KeysView[KT]: """Return a :py:class:`~collections.abc.KeysView` for the map.""" raise NotImplementedError def items(self) -> ItemsView[KT, VT_co]: """Return a :py:class:`~collections.abc.ItemsView` for the map.""" raise NotImplementedError def values(self) -> ValuesView[VT_co]: """Return a :py:class:`~collections.abc.ValuesView` for the map.""" raise NotImplementedError @overload def get(self, key: KT, /) -> VT_co | None: ... @overload def get(self, key: KT, default: VT_co | T, /) -> VT_co | T: ... @docs_argspec("(self, key: KT, default: VT_co | None, /) -> VT_co") def get(self, key: KT, default: Any = None, /) -> VT_co: r"""If ``key in self``, returns ``self[key]``. Otherwise returns ``default``.""" raise NotImplementedError
JsMap
python
bokeh__bokeh
src/bokeh/sphinxext/_internal/bokeh_prop.py
{ "start": 2216, "end": 4494 }
class ____(BokehDirective): has_content = True required_arguments = 1 optional_arguments = 2 option_spec = {"module": unchanged, "type": unchanged} def run(self): full_name = self.arguments[0] model_name, prop_name = full_name.rsplit(".") module_name = self.options["module"] try: module = importlib.import_module(module_name) except ImportError: raise SphinxError(f"Could not generate reference docs for {full_name}: could not import module {module_name}") model = getattr(module, model_name, None) if model is None: raise SphinxError(f"Unable to generate reference docs for {full_name}: no model {model_name} in module {module_name}") # We may need to instantiate deprecated objects as part of documenting # them in the reference guide. Suppress any warnings here to keep the # docs build clean just for this case with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=BokehDeprecationWarning) model_obj = model() try: descriptor = model_obj.lookup(prop_name) except AttributeError: raise SphinxError(f"Unable to generate reference docs for {full_name}: no property {prop_name} on model {model_name}") rst_text = PROP_DETAIL.render( name=prop_name, module=self.options["module"], default=repr(descriptor.instance_default(model_obj)), type_info=type_link(descriptor.property), doc="" if descriptor.__doc__ is None else textwrap.dedent(descriptor.__doc__), ) return self.parse(rst_text, f"<bokeh-prop: {model_name}.{prop_name}>") def setup(app): """ Required Sphinx extension setup function. """ app.add_directive_to_domain("py", "bokeh-prop", BokehPropDirective) return PARALLEL_SAFE # ----------------------------------------------------------------------------- # Private API # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Code # -----------------------------------------------------------------------------
BokehPropDirective
python
mwaskom__seaborn
tests/_core/test_properties.py
{ "start": 16751, "end": 19112 }
class ____(DataFixtures): def norm(self, x): return (x - x.min()) / (x.max() - x.min()) @pytest.mark.parametrize("data_type,scale_class", [ ("cat", Nominal), ("num", Continuous), ("bool", Boolean), ]) def test_default(self, data_type, scale_class, vectors): x = vectors[data_type] scale = self.prop().default_scale(x) assert isinstance(scale, scale_class) @pytest.mark.parametrize("arg,data_type,scale_class", [ ((1, 3), "cat", Nominal), ((1, 3), "num", Continuous), ((1, 3), "bool", Boolean), ([1, 2, 3], "cat", Nominal), ([1, 2, 3], "num", Nominal), ([1, 3], "bool", Boolean), ({"a": 1, "b": 3, "c": 2}, "cat", Nominal), ({2: 1, 4: 3, 8: 2}, "num", Nominal), ({True: 4, False: 2}, "bool", Boolean), ]) def test_inference(self, arg, data_type, scale_class, vectors): x = vectors[data_type] scale = self.prop().infer_scale(arg, x) assert isinstance(scale, scale_class) assert scale.values == arg def test_mapped_interval_numeric(self, num_vector): mapping = self.prop().get_mapping(Continuous(), num_vector) assert_array_equal(mapping([0, 1]), self.prop().default_range) def test_mapped_interval_categorical(self, cat_vector): mapping = self.prop().get_mapping(Nominal(), cat_vector) n = cat_vector.nunique() assert_array_equal(mapping([n - 1, 0]), self.prop().default_range) def test_bad_scale_values_numeric_data(self, num_vector): prop_name = self.prop.__name__.lower() err_stem = ( f"Values for {prop_name} variables with Continuous scale must be 2-tuple" ) with pytest.raises(TypeError, match=f"{err_stem}; not <class 'str'>."): self.prop().get_mapping(Continuous("abc"), num_vector) with pytest.raises(TypeError, match=f"{err_stem}; not 3-tuple."): self.prop().get_mapping(Continuous((1, 2, 3)), num_vector) def test_bad_scale_values_categorical_data(self, cat_vector): prop_name = self.prop.__name__.lower() err_text = f"Values for {prop_name} variables with Nominal scale" with pytest.raises(TypeError, match=err_text): self.prop().get_mapping(Nominal("abc"), cat_vector)
IntervalBase
python
PrefectHQ__prefect
src/integrations/prefect-redis/prefect_redis/messaging.py
{ "start": 6047, "end": 6391 }
class ____: """ A subscription-like object for Redis. We mimic the memory subscription interface so that we can set max_retries and handle dead letter queue storage in Redis. """ def __init__(self, max_retries: int = 3, dlq_key: str = "dlq"): self.max_retries = max_retries self.dlq_key = dlq_key
Subscription