language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
google__jax
jax/_src/custom_derivatives.py
{ "start": 45149, "end": 54515 }
class ____(core.Primitive): multiple_results = True def bind(self, *args, **params): return self._true_bind(*args, **params) def bind_with_trace(self, trace, args, params): fun, fwd, bwd, tracers = args[0], args[1], args[2], args[3:] return trace.process_custom_vjp_call(self, fun, fwd, bwd, tracers, **params) def impl(self, fun, fwd, bwd, *args): raise NotImplementedError def get_bind_params(self, params): new_params = dict(params) call_jaxpr: core.ClosedJaxpr = new_params.pop('call_jaxpr') num_consts: int = new_params.pop('num_consts') fwd_jaxpr_thunk = new_params.pop('fwd_jaxpr_thunk') fun = lu.wrap_init(core.jaxpr_as_fun(call_jaxpr), debug_info=call_jaxpr.jaxpr.debug_info) fwd = lift_fwd(num_consts, fwd_jaxpr_thunk) const_avals, _ = split_list(call_jaxpr.in_avals, [num_consts]) bwd = _handle_consts_in_bwd(new_params.pop('bwd'), const_avals) return [fun, fwd, bwd], new_params def lift_fwd(num_consts: int, fwd_jaxpr_thunk: lu.WrappedFun) -> lu.WrappedFun: def fwd(*args): vals, nonzeros = args[::2], args[1::2] assert len(vals) == len(nonzeros) _, primals = split_list(vals, [num_consts]) const_nonzeros, in_nonzeros = split_list(nonzeros, [num_consts]) if any(const_nonzeros): raise ad.CustomVJPException() fwd_jaxpr, fwd_consts = fwd_jaxpr_thunk.call_wrapped(*in_nonzeros) return core.eval_jaxpr(fwd_jaxpr, fwd_consts, *primals) return lu.wrap_init(fwd, debug_info=fwd_jaxpr_thunk.debug_info) @lu.transformation2 def _handle_consts_in_bwd(f, const_avals, *args): return [Zero(a) for a in const_avals] + list(f(*args)) custom_vjp_call_p = CustomVJPCallPrimitive('custom_vjp_call') # TODO(phawkins,mattjj): make this primitive cacheable. mlir.register_lowering(custom_vjp_call_p, _custom_jvp_vjp_call_lowering, cacheable=False) def _custom_vjp_call_typecheck(_, *in_avals, call_jaxpr, **kwargs): del in_avals, kwargs disallowed_effects = effects.custom_derivatives_allowed_effects.filter_not_in( call_jaxpr.effects) if disallowed_effects: raise NotImplementedError( f'Effects not supported in `custom_vjp`: {disallowed_effects}') return call_jaxpr.out_avals, call_jaxpr.effects core.custom_typechecks[custom_vjp_call_p] = _custom_vjp_call_typecheck def _custom_vjp_call_dce( used_outs: Sequence[bool], eqn: core.JaxprEqn ) -> tuple[list[bool], core.JaxprEqn | None]: if not any(used_outs) and not pe.has_effects(eqn): return [False] * len(eqn.invars), None call_jaxpr: core.ClosedJaxpr = eqn.params["call_jaxpr"] fwd_jaxpr_thunk = eqn.params["fwd_jaxpr_thunk"] bwd: lu.WrappedFun = eqn.params["bwd"] out_trees: Callable[[], tuple[PyTreeDef, PyTreeDef, list[int | None]]] = eqn.params["out_trees"] symbolic_zeros: bool = eqn.params["symbolic_zeros"] dce_call_jaxpr: core.ClosedJaxpr used_ins: Sequence[bool] dce_call_jaxpr, used_ins = _cached_closed_call_dce_instantiate( call_jaxpr, tuple(used_outs)) assert all(used_ins) @partial(lu.wrap_init, debug_info=fwd_jaxpr_thunk.debug_info) @pe._memoize def dce_fwd_jaxpr_thunk(*zeros): fwd_jaxpr = core.ClosedJaxpr(*fwd_jaxpr_thunk.call_wrapped(*zeros)) _, res_tree, fwds = out_trees() num_res_out = res_tree.num_leaves - sum(f is not None for f in fwds) dce_fwd_jaxpr, _ = _cached_closed_call_dce_instantiate( fwd_jaxpr, (True,) * num_res_out + tuple(used_outs)) return dce_fwd_jaxpr.jaxpr, dce_fwd_jaxpr.consts def dce_bwd(*args): _, res_tree, _ = out_trees() res, cts = split_list(args, [res_tree.num_leaves]) cts_ = iter(cts) all_cts = [] for used, aval in zip(used_outs, call_jaxpr.out_avals): if used: all_cts.append(next(cts_)) else: ct_aval = aval.to_tangent_aval() if symbolic_zeros: all_cts.append(SymbolicZero(ct_aval)) else: all_cts.append(zeros_like_aval(ct_aval)) assert next(cts_, None) is None return bwd.call_wrapped(*res, *all_cts) dce_bwd_wrapped = lu.wrap_init(dce_bwd, debug_info=bwd.debug_info) outvars = [v for used, v in zip(used_outs, eqn.outvars) if used] new_params = dict( eqn.params, call_jaxpr=dce_call_jaxpr, fwd_jaxpr_thunk=dce_fwd_jaxpr_thunk, bwd=dce_bwd_wrapped, ) new_eqn = pe.new_jaxpr_eqn( eqn.invars, outvars, eqn.primitive, new_params, dce_call_jaxpr.effects, eqn.source_info, eqn.ctx) return list(used_ins), new_eqn pe.dce_rules[custom_vjp_call_p] = _custom_vjp_call_dce def _custom_vjp_call_pp_rule(eqn: core.JaxprEqn, context: core.JaxprPpContext, settings: core.JaxprPpSettings) -> core.pp.Doc: params = dict(eqn.params) if not params["num_consts"]: params.pop("num_consts") params.pop("out_trees") params["fwd"] = params.pop("fwd_jaxpr_thunk").debug_info.func_name params["bwd"] = params.pop("bwd").debug_info.func_name names = sorted(params) params["name"] = params["call_jaxpr"].jaxpr.debug_info.func_name return core._pp_eqn(eqn.replace(params=params), context, settings, params=["name"] + names) core.pp_eqn_rules[custom_vjp_call_p] = _custom_vjp_call_pp_rule batching.primitive_batchers[ad.custom_lin_p] = ad.raise_custom_vjp_error_on_jvp # TODO(phawkins,mattjj): make this primitive cacheable. mlir.register_lowering(ad.custom_lin_p, ad.raise_custom_vjp_error_on_jvp, cacheable=False) def custom_gradient(fun): """Convenience function for defining custom VJP rules (aka custom gradients). While the canonical way to define custom VJP rules is via ``jax.custom_vjp``, the ``custom_gradient`` convenience wrapper follows TensorFlow's ``tf.custom_gradient`` API. The difference here is that ``custom_gradient`` can be used as a decorator on one function that returns both the primal value (representing the output of the mathematical function to be differentiated) and the VJP (gradient) function. See https://www.tensorflow.org/api_docs/python/tf/custom_gradient. If the mathematical function to be differentiated has Haskell-like signature ``a -> b``, then the Python callable ``fun`` should have the signature ``a -> (b, CT b --o CT a)`` where we use ``CT x`` to denote a cotangent type for ``x`` and the ``--o`` arrow to denote a linear function. See the example below. That is, ``fun`` should return a pair where the first element represents the value of the mathematical function to be differentiated and the second element is a function to be called on the backward pass of reverse-mode automatic differentiation (i.e. the "custom gradient" function). The function returned as the second element of the output of ``fun`` can close over intermediate values computed when evaluating the function to be differentiated. That is, use lexical closure to share work between the forward pass and the backward pass of reverse-mode automatic differentiation. However, it cannot perform Python control flow which depends on the values of the closed-over intermediate values or its cotangent arguments; if the function includes such control flow, an error is raised. Args: fun: a Python callable specifying both the mathematical function to be differentiated and its reverse-mode differentiation rule. It should return a pair consisting of an output value and a Python callable that represents the custom gradient function. Returns: A Python callable that accepts the same arguments as ``fun`` and returns the output value specified by the first element of ``fun``'s output pair. For example: >>> @jax.custom_gradient ... def f(x): ... return x ** 2, lambda g: (g * x,) ... >>> print(f(3.)) 9.0 >>> print(jax.grad(f)(3.)) 3.0 An example with a function on two arguments, so that the VJP function must return a tuple of length two: >>> @jax.custom_gradient ... def f(x, y): ... return x * y, lambda g: (g * y, g * x) ... >>> print(f(3., 4.)) 12.0 >>> print(jax.grad(f, argnums=(0, 1))(3., 4.)) (Array(4., dtype=float32, weak_type=True), Array(3., dtype=float32, weak_type=True)) """ @custom_vjp def wrapped_fun(*args, **kwargs): ans, _ = fun(*args, **kwargs) return ans def fwd(*args, **kwargs): ans, rule = fun(*args, **kwargs) ans_flat, out_tree = tree_flatten((ans,)) debug_fwd = debug_info("custom_gradient fwd", rule, (ans,), {}) rule, in_tree = flatten_fun_nokwargs(lu.wrap_init(rule, debug_info=debug_fwd), out_tree) ans_avals = [core.get_aval(x).to_tangent_aval() for x in ans_flat] jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(rule, ans_avals) return ans, Residuals(jaxpr, in_tree(), out_tree, consts) def bwd(res, cts): jaxpr, in_tree, out_tree, consts = res cts_flat, out_tree_ = tree_flatten((cts,)) if out_tree != out_tree_: raise TypeError(f'{out_tree}\n!=\n{out_tree_}') cts_out = core.eval_jaxpr(jaxpr, consts, *cts_flat) cts_out = tree_unflatten(in_tree, cts_out) if treedef_is_leaf(in_tree): cts_out = (cts_out,) return cts_out wrapped_fun.defvjp(fwd, bwd) return wrapped_fun @register_pytree_node_class
CustomVJPCallPrimitive
python
fabric__fabric
fabric/testing/base.py
{ "start": 980, "end": 2370 }
class ____: """ Data record specifying params of a command execution to mock/expect. :param str cmd: Command string to expect. If not given, no expectations about the command executed will be set up. Default: ``None``. :param bytes out: Data yielded as remote stdout. Default: ``b""``. :param bytes err: Data yielded as remote stderr. Default: ``b""``. :param int exit: Remote exit code. Default: ``0``. :param int waits: Number of calls to the channel's ``exit_status_ready`` that should return ``False`` before it then returns ``True``. Default: ``0`` (``exit_status_ready`` will return ``True`` immediately). .. versionadded:: 2.1 """ def __init__(self, cmd=None, out=b"", err=b"", in_=None, exit=0, waits=0): self.cmd = cmd self.out = out self.err = err self.in_ = in_ self.exit = exit self.waits = waits def __repr__(self): # TODO: just leverage attrs, maybe vendored into Invoke so we don't # grow more dependencies? Ehhh return "<{} cmd={!r}>".format(self.__class__.__name__, self.cmd) def expect_execution(self, channel): """ Assert that the ``channel`` was used to run this command. .. versionadded:: 2.7 """ channel.exec_command.assert_called_with(self.cmd or ANY)
Command
python
mlflow__mlflow
mlflow/entities/logged_model_tag.py
{ "start": 104, "end": 850 }
class ____(_MlflowObject): """Tag object associated with a Model.""" def __init__(self, key, value): self._key = key self._value = value def __eq__(self, other): if type(other) is type(self): # TODO deep equality here? return self.__dict__ == other.__dict__ return False @property def key(self): """String name of the tag.""" return self._key @property def value(self): """String value of the tag.""" return self._value def to_proto(self): return pb2.LoggedModelTag(key=self._key, value=self._value) @classmethod def from_proto(cls, proto): return cls(key=proto.key, value=proto.value)
LoggedModelTag
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 663206, "end": 663645 }
class ____(sgqlc.types.Type): """A funding platform link for a repository.""" __schema__ = github_schema __field_names__ = ("platform", "url") platform = sgqlc.types.Field(sgqlc.types.non_null(FundingPlatform), graphql_name="platform") """The funding platform this link is for.""" url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url") """The configured URL for this funding link."""
FundingLink
python
run-llama__llama_index
llama-index-packs/llama-index-packs-self-discover/llama_index/packs/self_discover/base.py
{ "start": 6436, "end": 6567 }
class ____(Event): """Event to create reasoning structure.""" task: str reasoning_structure: str
ReasoningStructureEvent
python
Pylons__pyramid
src/pyramid/path.py
{ "start": 2922, "end": 3851 }
class ____: def __init__(self, package=CALLER_PACKAGE): if package in (None, CALLER_PACKAGE): self.package = package else: if isinstance(package, str): try: __import__(package) except ImportError: raise ValueError( f'The dotted name {package!r} cannot be imported' ) package = sys.modules[package] self.package = package_of(package) def get_package_name(self): if self.package is CALLER_PACKAGE: package_name = caller_package().__name__ else: package_name = self.package.__name__ return package_name def get_package(self): if self.package is CALLER_PACKAGE: package = caller_package() else: package = self.package return package
Resolver
python
django__django
tests/messages_tests/test_api.py
{ "start": 1752, "end": 2016 }
class ____(ApiTests): """ add_message() should use ducktyping to allow request wrappers such as the one in Django REST framework. """ def setUp(self): super().setUp() self.request = CustomRequest(self.request)
CustomRequestApiTests
python
dateutil__dateutil
tests/test_parser.py
{ "start": 15165, "end": 17234 }
class ____(object): def assert_equal_same_tz(self, dt1, dt2): assert dt1 == dt2 assert dt1.tzinfo is dt2.tzinfo def test_tzinfo_dict_could_return_none(self): dstr = "2017-02-03 12:40 BRST" result = parse(dstr, tzinfos={"BRST": None}) expected = datetime(2017, 2, 3, 12, 40) self.assert_equal_same_tz(result, expected) def test_tzinfos_callable_could_return_none(self): dstr = "2017-02-03 12:40 BRST" result = parse(dstr, tzinfos=lambda *args: None) expected = datetime(2017, 2, 3, 12, 40) self.assert_equal_same_tz(result, expected) def test_invalid_tzinfo_input(self): dstr = "2014 January 19 09:00 UTC" # Pass an absurd tzinfos object tzinfos = {"UTC": ValueError} with pytest.raises(TypeError): parse(dstr, tzinfos=tzinfos) def test_valid_tzinfo_tzinfo_input(self): dstr = "2014 January 19 09:00 UTC" tzinfos = {"UTC": tz.UTC} expected = datetime(2014, 1, 19, 9, tzinfo=tz.UTC) res = parse(dstr, tzinfos=tzinfos) self.assert_equal_same_tz(res, expected) def test_valid_tzinfo_unicode_input(self): dstr = "2014 January 19 09:00 UTC" tzinfos = {u"UTC": u"UTC+0"} expected = datetime(2014, 1, 19, 9, tzinfo=tz.tzstr("UTC+0")) res = parse(dstr, tzinfos=tzinfos) self.assert_equal_same_tz(res, expected) def test_valid_tzinfo_callable_input(self): dstr = "2014 January 19 09:00 UTC" def tzinfos(*args, **kwargs): return u"UTC+0" expected = datetime(2014, 1, 19, 9, tzinfo=tz.tzstr("UTC+0")) res = parse(dstr, tzinfos=tzinfos) self.assert_equal_same_tz(res, expected) def test_valid_tzinfo_int_input(self): dstr = "2014 January 19 09:00 UTC" tzinfos = {u"UTC": -28800} expected = datetime(2014, 1, 19, 9, tzinfo=tz.tzoffset(u"UTC", -28800)) res = parse(dstr, tzinfos=tzinfos) self.assert_equal_same_tz(res, expected)
TestTzinfoInputTypes
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/model_tests/model_handler.py
{ "start": 9854, "end": 12622 }
class ____(_ModelHandlerBase): """Runs a model in TF1.""" @property def meta_graph(self) -> meta_graph_pb2.MetaGraphDef: return load_meta_graph( saved_model_dir=self.model_config.saved_model_dir, saved_model_tags=self.model_config.saved_model_tags, saved_model_signature_key=self.model_config.saved_model_signature_key) @property def input_tensor_info(self) -> Mapping[str, meta_graph_pb2.TensorInfo]: return self.meta_graph.signature_def[ self.model_config.saved_model_signature_key].inputs @property def output_tensor_info(self) -> Mapping[str, meta_graph_pb2.TensorInfo]: return self.meta_graph.signature_def[ self.model_config.saved_model_signature_key].outputs @property def input_tensort_names(self) -> Sequence[str]: return [info.name for info in self.input_tensor_info.values()] @property def output_tensor_names(self) -> Sequence[str]: return [info.name for info in self.output_tensor_info.values()] def generate_random_inputs(self, batch_size: Optional[int] = None ) -> Mapping[str, np.ndarray]: batch_size = batch_size or self.model_config.default_batch_size return { tensor_info.name: _generate_random_tensor_v1(tensor_info, batch_size) for tensor_info in self.input_tensor_info.values() } def run(self, inputs: Optional[Mapping[str, np.ndarray]] = None, warmup_iterations=10, benchmark_iterations=100, enable_gpu=True) -> TestResult: inputs = inputs or self.generate_random_inputs() config_proto = None if not enable_gpu: config_proto = config_pb2.ConfigProto(device_count={"CPU": 1, "GPU": 0}) logging.info("Running model inference!") with framework_ops.Graph().as_default(): with session.Session(config=config_proto) as sess: importer.import_graph_def(self.meta_graph.graph_def, name="") try: output_tensor_names = self.output_tensor_names for _ in range(warmup_iterations): sess.run(fetches=output_tensor_names, feed_dict=inputs) latency = [] for _ in range(benchmark_iterations): before = time.time() outputs = sess.run(fetches=output_tensor_names, feed_dict=inputs) latency.append(time.time() - before) except Exception as exc: raise RuntimeError("Failed to run model inference! " "Model information: {}".format(str(self))) from exc return TestResult( model_config=self.model_config, enable_gpu=enable_gpu, model_latency=latency, output_names=self.output_tensor_names, output_tensors=outputs)
ModelHandlerV1
python
jazzband__django-model-utils
model_utils/managers.py
{ "start": 12399, "end": 12503 }
class ____(QueryManagerMixin[ModelT], models.Manager[ModelT]): # type: ignore[misc] pass
QueryManager
python
spack__spack
lib/spack/spack/new_installer.py
{ "start": 8498, "end": 17415 }
class ____: """Manages the installation prefix during overwrite installations.""" def __init__(self, prefix: str, overwrite: bool, keep_prefix: bool = False) -> None: """Initialize the prefix pivoter. Args: prefix: The installation prefix path overwrite: Whether to allow overwriting an existing prefix keep_prefix: Whether to keep a failed installation prefix (when not overwriting) """ self.prefix = prefix #: Whether to allow installation when the prefix exists self.overwrite = overwrite #: Whether to keep a failed installation prefix self.keep_prefix = keep_prefix #: Temporary location for the original prefix during overwrite self.tmp_prefix: Optional[str] = None self.parent = os.path.dirname(prefix) def __enter__(self) -> "PrefixPivoter": """Enter the context: move existing prefix to temporary location if needed.""" if not self._lexists(self.prefix): return self if not self.overwrite: raise spack.error.InstallError(f"Install prefix {self.prefix} already exists") # Move the existing prefix to a temporary location self.tmp_prefix = self._mkdtemp( dir=self.parent, prefix=".", suffix=OVERWRITE_BACKUP_SUFFIX ) self._rename(self.prefix, self.tmp_prefix) return self def __exit__( self, exc_type: Optional[type], exc_val: Optional[BaseException], exc_tb: Optional[object] ) -> None: """Exit the context: cleanup on success, restore on failure.""" if exc_type is None: # Success: remove the backup in case of overwrite if self.tmp_prefix is not None: self._rmtree_ignore_errors(self.tmp_prefix) return # Failure handling: # Priority 1: If we're overwriting, always restore the original prefix # Priority 2: If keep_prefix is False, remove the failed installation if self.overwrite and self.tmp_prefix is not None: # Overwrite case: restore the original prefix if it existed # The highest priority is to restore the original prefix, so we try to: # rename prefix -> garbage: move failed dir out of the way # rename tmp_prefix -> prefix: restore original prefix # remove garbage (this is allowed to fail) garbage = self._mkdtemp(dir=self.parent, prefix=".", suffix=OVERWRITE_GARBAGE_SUFFIX) try: self._rename(self.prefix, garbage) has_failed_prefix = True except FileNotFoundError: # prefix dir does not exist, so we don't have to delete it. has_failed_prefix = False self._rename(self.tmp_prefix, self.prefix) if has_failed_prefix: self._rmtree_ignore_errors(garbage) elif not self.keep_prefix and self._lexists(self.prefix): # Not overwriting, keep_prefix is False: remove the failed installation garbage = self._mkdtemp(dir=self.parent, prefix=".", suffix=OVERWRITE_GARBAGE_SUFFIX) self._rename(self.prefix, garbage) self._rmtree_ignore_errors(garbage) # else: keep_prefix is True, leave the failed prefix in place def _lexists(self, path: str) -> bool: return os.path.lexists(path) def _rename(self, src: str, dst: str) -> None: os.rename(src, dst) def _mkdtemp(self, dir: str, prefix: str, suffix: str) -> str: return tempfile.mkdtemp(dir=dir, prefix=prefix, suffix=suffix) def _rmtree_ignore_errors(self, path: str) -> None: shutil.rmtree(path, ignore_errors=True) def worker_function( spec: spack.spec.Spec, explicit: bool, mirrors: List[spack.url_buildcache.MirrorURLAndVersion], unsigned: Optional[bool], install_policy: InstallPolicy, dirty: bool, keep_stage: bool, restage: bool, overwrite: bool, keep_prefix: bool, skip_patch: bool, state: Connection, parent: Connection, echo_control: Connection, makeflags: str, js1: Optional[Connection], js2: Optional[Connection], store: spack.store.Store, config: spack.config.Configuration, ): """ Function run in the build child process. Installs the specified spec, sending state updates and build output back to the parent process. Args: spec: Spec to install explicit: Whether the spec was explicitly requested by the user mirrors: List of buildcache mirrors to try unsigned: Whether to allow unsigned buildcache entries install_policy: ``"auto"``, ``"cache_only"``, or ``"source_only"`` dirty: Whether to preserve user environment in the build environment keep_stage: Whether to keep the build stage after installation restage: Whether to restage the source before building overwrite: Whether to overwrite the existing install prefix keep_prefix: Whether to keep a failed installation prefix skip_patch: Whether to skip the patch phase state: Connection to send state updates to parent: Connection to send build output to echo_control: Connection to receive echo control messages from makeflags: MAKEFLAGS to set, so that the build process uses the POSIX jobserver js1: Connection for old style jobserver read fd (if any). Unused, just to inherit fd. js2: Connection for old style jobserver write fd (if any). Unused, just to inherit fd. store: global store instance from parent config: global config instance from parent """ # TODO: don't start a build for external packages if spec.external: return tee = Tee(echo_control, parent) os.environ["MAKEFLAGS"] = makeflags spack.store.STORE = store spack.config.CONFIG = config spack.paths.set_working_dir() # Use closedfd=false because of the connection objects. Use line buffering. state_stream = os.fdopen(state.fileno(), "w", buffering=1, closefd=False) exit_code = 0 try: with PrefixPivoter(spec.prefix, overwrite, keep_prefix): _install( spec, explicit, mirrors, unsigned, install_policy, dirty, keep_stage, restage, skip_patch, state_stream, tee, store, ) except Exception: traceback.print_exc() # log the traceback to the log file exit_code = 1 finally: tee.close() state_stream.close() sys.exit(exit_code) def _install( spec: spack.spec.Spec, explicit: bool, mirrors: List[spack.url_buildcache.MirrorURLAndVersion], unsigned: Optional[bool], install_policy: InstallPolicy, dirty: bool, keep_stage: bool, restage: bool, skip_patch: bool, state_stream: io.TextIOWrapper, tee: Tee, store: spack.store.Store = spack.store.STORE, ) -> None: """Install a spec from build cache or source.""" # Create the stage and log file before starting the tee thread. pkg = spec.package spack.build_environment.setup_package(pkg, dirty=dirty) # Try to install from buildcache, unless user asked for source only if install_policy != "source_only": if mirrors and install_from_buildcache(mirrors, spec, unsigned, state_stream): spack.hooks.post_install(spec, explicit) return elif install_policy == "cache_only": # Binary required but not available send_state("no binary available", state_stream) raise spack.error.InstallError(f"No binary available for {spec}") store.layout.create_install_directory(spec) stage = pkg.stage stage.keep = keep_stage # Then try a source build. with stage: if restage: stage.destroy() stage.create() # Start collecting logs. tee.set_output_file(pkg.log_path) send_state("staging", state_stream) if not skip_patch: pkg.do_patch() else: pkg.do_stage() os.chdir(stage.source_path) spack.hooks.pre_install(spec) for phase in spack.builder.create(pkg): send_state(phase.name, state_stream) phase.execute() # Install source build logs with open(pkg.log_path, "rb") as f, open(pkg.install_log_path, "wb") as g: # Use GzipFile directly so we can omit filename / mtime in header gzip_file = GzipFile(filename="", mode="wb", compresslevel=6, mtime=0, fileobj=g) shutil.copyfileobj(f, gzip_file) gzip_file.close() spack.hooks.post_install(spec, explicit)
PrefixPivoter
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_format25.py
{ "start": 315, "end": 1580 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_format25.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with chart formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [108178048, 108319488] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", "border": {"color": "red", "transparency": 50}, } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sqlalchemy__sqlalchemy
test/orm/test_cascade.py
{ "start": 83778, "end": 88572 }
class ____(fixtures.MappedTest): """Pending entities that are orphans""" @classmethod def define_tables(cls, metadata): Table( "users", metadata, Column( "user_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("name", String(40)), ) Table( "addresses", metadata, Column( "address_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("user_id", Integer, ForeignKey("users.user_id")), Column("email_address", String(40)), ) Table( "orders", metadata, Column( "order_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column( "user_id", Integer, ForeignKey("users.user_id"), nullable=False ), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass class Order(cls.Comparable): pass def test_pending_standalone_orphan(self): """Standalone 'orphan' objects can now be persisted, if the underlying constraints of the database allow it. This now supports persisting of objects based on foreign key values alone. """ users, orders, User, Address, Order, addresses = ( self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, ) self.mapper_registry.map_imperatively(Order, orders) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively( User, users, properties=dict( addresses=relationship( Address, cascade="all,delete-orphan", backref="user" ), orders=relationship(Order, cascade="all, delete-orphan"), ), ) s = fixture_session() # the standalone Address goes in, its foreign key # allows NULL a = Address() s.add(a) s.commit() # the standalone Order does not. o = Order() s.add(o) assert_raises(sa_exc.DBAPIError, s.commit) s.rollback() # can assign o.user_id by foreign key, # flush succeeds u = User() s.add(u) s.flush() o = Order(user_id=u.user_id) s.add(o) s.commit() assert o in s and o not in s.new def test_pending_collection_expunge(self): """Removing a pending item from a collection expunges it from the session.""" users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User, ) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively( User, users, properties=dict( addresses=relationship( Address, cascade="all,delete-orphan", backref="user" ) ), ) s = fixture_session() u = User() s.add(u) s.flush() a = Address() u.addresses.append(a) assert a in s u.addresses.remove(a) assert a not in s s.delete(u) s.flush() assert a.address_id is None, "Error: address should not be persistent" def test_nonorphans_ok(self): users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User, ) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively( User, users, properties=dict( addresses=relationship( Address, cascade="all,delete", backref="user" ) ), ) s = fixture_session() u = User(name="u1", addresses=[Address(email_address="ad1")]) s.add(u) a1 = u.addresses[0] u.addresses.remove(a1) assert a1 in s s.flush() s.expunge_all() eq_(s.query(Address).all(), [Address(email_address="ad1")])
PendingOrphanTestSingleLevel
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/redshift_data.py
{ "start": 1552, "end": 10854 }
class ____(AwsBaseOperator[RedshiftDataHook]): """ Executes SQL Statements against an Amazon Redshift cluster using Redshift Data. ... see also:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:RedshiftDataOperator` :param database: the name of the database :param sql: the SQL statement or list of SQL statement to run :param cluster_identifier: unique identifier of a cluster :param db_user: the database username :param parameters: the parameters for the SQL statement :param secret_arn: the name or ARN of the secret that enables db access :param statement_name: the name of the SQL statement :param with_event: indicates whether to send an event to EventBridge :param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait :param poll_interval: how often in seconds to check the query status :param return_sql_result: if True will return the result of an SQL statement, if False (default) will return statement ID :param workgroup_name: name of the Redshift Serverless workgroup. Mutually exclusive with `cluster_identifier`. Specify this parameter to query Redshift Serverless. More info https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-serverless.html :param session_id: the session identifier of the query :param session_keep_alive_seconds: duration in seconds to keep the session alive after the query finishes. The maximum time a session can keep alive is 24 hours :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ aws_hook_class = RedshiftDataHook template_fields = aws_template_fields( "cluster_identifier", "database", "sql", "db_user", "parameters", "statement_name", "workgroup_name", "session_id", ) template_ext = (".sql",) template_fields_renderers = {"sql": "sql"} def __init__( self, sql: str | list, database: str | None = None, cluster_identifier: str | None = None, db_user: str | None = None, parameters: list | None = None, secret_arn: str | None = None, statement_name: str | None = None, with_event: bool = False, wait_for_completion: bool = True, poll_interval: int = 10, return_sql_result: bool = False, workgroup_name: str | None = None, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), session_id: str | None = None, session_keep_alive_seconds: int | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.database = database self.sql = sql self.cluster_identifier = cluster_identifier self.workgroup_name = workgroup_name self.db_user = db_user self.parameters = parameters self.secret_arn = secret_arn self.statement_name = statement_name self.with_event = with_event self.wait_for_completion = wait_for_completion if poll_interval > 0: self.poll_interval = poll_interval else: self.log.warning( "Invalid poll_interval:", poll_interval, ) self.return_sql_result = return_sql_result self.deferrable = deferrable self.session_id = session_id self.session_keep_alive_seconds = session_keep_alive_seconds def execute(self, context: Context) -> list[GetStatementResultResponseTypeDef] | list[str]: """Execute a statement against Amazon Redshift.""" self.log.info("Executing statement: %s", self.sql) # Set wait_for_completion to False so that it waits for the status in the deferred task. wait_for_completion = self.wait_for_completion if self.deferrable: wait_for_completion = False query_execution_output = self.hook.execute_query( database=self.database, sql=self.sql, cluster_identifier=self.cluster_identifier, workgroup_name=self.workgroup_name, db_user=self.db_user, parameters=self.parameters, secret_arn=self.secret_arn, statement_name=self.statement_name, with_event=self.with_event, wait_for_completion=wait_for_completion, poll_interval=self.poll_interval, session_id=self.session_id, session_keep_alive_seconds=self.session_keep_alive_seconds, ) # Pull the statement ID, session ID self.statement_id: str = query_execution_output.statement_id if query_execution_output.session_id: context["ti"].xcom_push(key="session_id", value=query_execution_output.session_id) if self.deferrable and self.wait_for_completion: is_finished: bool = self.hook.check_query_is_finished(self.statement_id) if not is_finished: self.defer( timeout=self.execution_timeout, trigger=RedshiftDataTrigger( statement_id=self.statement_id, task_id=self.task_id, poll_interval=self.poll_interval, aws_conn_id=self.aws_conn_id, region_name=self.region_name, verify=self.verify, botocore_config=self.botocore_config, ), method_name="execute_complete", ) # Use the get_sql_results method to return the results of the SQL query, or the statement_ids, # depending on the value of self.return_sql_result return self.get_sql_results(statement_id=self.statement_id, return_sql_result=self.return_sql_result) def execute_complete( self, context: Context, event: dict[str, Any] | None = None ) -> list[GetStatementResultResponseTypeDef] | list[str]: validated_event = validate_execute_complete_event(event) if validated_event["status"] == "error": msg = f"context: {context}, error message: {validated_event['message']}" raise AirflowException(msg) statement_id = validated_event["statement_id"] if not statement_id: raise AirflowException("statement_id should not be empty.") self.log.info("%s completed successfully.", self.task_id) # Use the get_sql_results method to return the results of the SQL query, or the statement_ids, # depending on the value of self.return_sql_result return self.get_sql_results(statement_id=statement_id, return_sql_result=self.return_sql_result) def get_sql_results( self, statement_id: str, return_sql_result: bool ) -> list[GetStatementResultResponseTypeDef] | list[str]: """ Retrieve either the result of the SQL query, or the statement ID(s). :param statement_id: Statement ID of the running queries :param return_sql_result: Boolean, true if results should be returned """ # ISSUE-40427: Pull the statement, and check to see if there are sub-statements. If that is the # case, pull each of the sub-statement ID's, and grab the results. Otherwise, just use statement_id statement: DescribeStatementResponseTypeDef = self.hook.conn.describe_statement(Id=statement_id) statement_ids: list[str] = ( [sub_statement["Id"] for sub_statement in statement["SubStatements"]] if len(statement.get("SubStatements", [])) > 0 else [statement_id] ) # If returning the SQL result, use get_statement_result to return the records for each query if return_sql_result: results: list = [self.hook.conn.get_statement_result(Id=sid) for sid in statement_ids] self.log.debug("Statement result(s): %s", results) return results return statement_ids def on_kill(self) -> None: """Cancel the submitted redshift query.""" if hasattr(self, "statement_id"): self.log.info("Received a kill signal.") self.log.info("Stopping Query with statementId - %s", self.statement_id) try: self.hook.conn.cancel_statement(Id=self.statement_id) except Exception as ex: self.log.error("Unable to cancel query. Exiting. %s", ex)
RedshiftDataOperator
python
astropy__astropy
astropy/coordinates/name_resolve.py
{ "start": 798, "end": 1134 }
class ____(ScienceState): """ The URL(s) to Sesame's web-queryable database. """ _value = [ "https://cds.unistra.fr/cgi-bin/nph-sesame/", "http://vizier.cfa.harvard.edu/viz-bin/nph-sesame/", ] @classmethod def validate(cls, value): # TODO: Implement me return value
sesame_url
python
pallets__click
examples/validation/validation.py
{ "start": 219, "end": 1407 }
class ____(click.ParamType): name = "url" def convert(self, value, param, ctx): if not isinstance(value, tuple): value = urlparse.urlparse(value) if value.scheme not in ("http", "https"): self.fail( f"invalid URL scheme ({value.scheme}). Only HTTP URLs are allowed", param, ctx, ) return value @click.command() @click.option( "--count", default=2, callback=validate_count, help="A positive even number." ) @click.option("--foo", help="A mysterious parameter.") @click.option("--url", help="A URL", type=URL()) @click.version_option() def cli(count, foo, url): """Validation. This example validates parameters in different ways. It does it through callbacks, through a custom type as well as by validating manually in the function. """ if foo is not None and foo != "wat": raise click.BadParameter( 'If a value is provided it needs to be the value "wat".', param_hint=["--foo"], ) click.echo(f"count: {count}") click.echo(f"foo: {foo}") click.echo(f"url: {url!r}")
URL
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
{ "start": 16489, "end": 26301 }
class ____(FSDPTestMultiThread): @property def world_size(self) -> int: return 2 @skip_if_lt_x_gpu(1) def test_float16_on_one_submodule(self): x = torch.zeros(2, 100, device=device_type) # Subtest 1: use fp16 on the second child submodule -- does not require # any additional casting logic forward_inputs: dict[str, nn.Module] = {} model = SaveForwardInputsModel( forward_inputs, cast_forward_inputs=False, ).to(device_type) fully_shard(model.c2, mp_policy=MixedPrecisionPolicy(param_dtype=torch.float16)) fully_shard(model) model(x).sum().backward() self.assertEqual(forward_inputs[model].dtype, torch.float32) self.assertEqual(forward_inputs[model.c1].dtype, torch.float32) self.assertEqual(forward_inputs[model.c2].dtype, torch.float16) # Subtest 2: use fp16 on the second child module, where the user module # owns the cast forward_inputs: dict[nn.Module, torch.Tensor] = {} model = SaveForwardInputsModel( forward_inputs=forward_inputs, cast_forward_inputs=True ).to(device_type) fully_shard( model.c2, mp_policy=MixedPrecisionPolicy( param_dtype=torch.float16, cast_forward_inputs=False ), ) fully_shard(model) model(x).sum().backward() self.assertEqual(forward_inputs[model].dtype, torch.float32) self.assertEqual(forward_inputs[model.c1].dtype, torch.float32) self.assertEqual(forward_inputs[model.c2].dtype, torch.float32) # Subtest 3: use fp16 on the first child module and specify its output # dtype so that the second child module does not need to cast forward_inputs: dict[nn.Module, torch.Tensor] = {} model = SaveForwardInputsModel( forward_inputs=forward_inputs, cast_forward_inputs=False ).to(device_type) fully_shard( model.c1, mp_policy=MixedPrecisionPolicy( param_dtype=torch.float16, output_dtype=torch.float32 ), ) fully_shard(model) model(x).sum().backward() self.assertEqual(forward_inputs[model].dtype, torch.float32) self.assertEqual(forward_inputs[model.c1].dtype, torch.float16) self.assertEqual(forward_inputs[model.c2].dtype, torch.float32) @skip_if_lt_x_gpu(1) def test_submodules_with_external_inputs(self): self.run_subtests( {"enable_submodule_cast": [False, True]}, self._test_submodules_with_external_inputs, ) def _test_submodules_with_external_inputs(self, enable_submodule_cast: bool): class ToyModule(nn.Module): def __init__(self, forward_inputs: dict[str, torch.Tensor]) -> None: super().__init__() self.l = nn.Linear(100, 100) self.forward_inputs = forward_inputs def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: self.forward_inputs["l2_input_x"] = x self.forward_inputs["l2_input_y"] = y return self.l(x) class ToyModel(nn.Module): def __init__(self, forward_inputs: dict[str, torch.Tensor]) -> None: super().__init__() self.l1 = nn.Linear(100, 100) self.l2 = ToyModule(forward_inputs) self.forward_inputs = forward_inputs def forward(self, x: torch.Tensor) -> torch.Tensor: self.forward_inputs["model_input_x"] = x y = torch.ones( 2, 100, device=device_type.type, dtype=torch.float32 ) # external input return self.l2(self.l1(x), y) forward_inputs: dict[str, torch.Tensor] = {} model = ToyModel(forward_inputs).to(device_type) x = torch.zeros(2, 100, device=device_type.type, dtype=torch.float32) fully_shard( model.l2, mp_policy=MixedPrecisionPolicy( param_dtype=torch.float16, cast_forward_inputs=enable_submodule_cast ), ) fully_shard(model, mp_policy=MixedPrecisionPolicy(param_dtype=torch.float16)) model(x).sum().backward() # If we enable `model.l2` to cast (as default), then `l2_input_y` gets # cast to fp16, and if we disable, then it says as fp32. self.assertEqual(forward_inputs["model_input_x"].dtype, torch.float16) self.assertEqual(forward_inputs["l2_input_x"].dtype, torch.float16) self.assertEqual( forward_inputs["l2_input_y"].dtype, torch.float16 if enable_submodule_cast else torch.float32, ) @skip_if_lt_x_gpu(1) @requires_nccl_version((2, 10), "Need NCCL 2.10+ for bf16 collectives") def test_norm_modules_bf16(self): mp_policy = MixedPrecisionPolicy(param_dtype=torch.bfloat16) self._test_norm_modules(mp_policy) @skip_if_lt_x_gpu(1) def test_norm_modules_fp16(self): mp_policy = MixedPrecisionPolicy(param_dtype=torch.float16) self._test_norm_modules(mp_policy) def _test_norm_modules(self, mp_policy: MixedPrecisionPolicy): def inner(model: nn.Module, x: torch.Tensor): # Run forward and backward to check for no type mismatch errors z = model(x) self.assertEqual(z.dtype, mp_policy.param_dtype) z.sum().backward() # Layer norm model = nn.Sequential(nn.Linear(32, 32), nn.LayerNorm(32), nn.Linear(32, 32)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((4, 32))) # Batch norm 1D model = nn.Sequential(nn.Linear(32, 32), nn.BatchNorm1d(32), nn.Linear(32, 32)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((4, 32))) # Batch norm 2D: error in backward from buffer dtype mismatch model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) if TEST_HPU: inner(model, torch.randn((3, 1, 9, 9))) else: with self.assertRaisesRegex( RuntimeError, "Expected running_mean to have type", # Error not seen on HPUs and hence it can be skipped ): # Errors in batch norm 2D backward inner(model, torch.randn((3, 1, 9, 9))) # Batch norm 2D: cast buffers down to lower precision model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) for module in (model[0], model[1], model[2], model): fully_shard(module, mp_policy=mp_policy) # Casting batch norm buffers to the lower precision allows backward model[1].running_mean = model[1].running_mean.to(mp_policy.param_dtype) model[1].running_var = model[1].running_var.to(mp_policy.param_dtype) inner(model, torch.randn((3, 1, 9, 9))) # Batch norm 2D: use special mixed precision policy model = nn.Sequential(nn.Conv2d(1, 5, 3), nn.BatchNorm2d(5), nn.Conv2d(5, 4, 3)) bn_mp_policy = MixedPrecisionPolicy(output_dtype=mp_policy.param_dtype) fully_shard(model[1], mp_policy=bn_mp_policy) for module in (model[0], model[2], model): fully_shard(module, mp_policy=mp_policy) inner(model, torch.randn((3, 1, 9, 9))) @skip_if_lt_x_gpu(1) def test_clamp_reduce_dtype(self): # Initialize the model directly in bf16 init_dtype = torch.bfloat16 model = nn.Sequential( nn.Linear(32, 32, dtype=init_dtype), nn.Linear(32, 32, dtype=init_dtype), ).to(device_type.type) mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.bfloat16 ) # Check that we did not clamp the reduce dtype self.assertEqual(mp_policy.reduce_dtype, torch.bfloat16) for module in model: fully_shard((module), mp_policy=mp_policy) fully_shard(model, mp_policy=mp_policy) # Check that the reduce-scatter runs in bf16 even after we change the # model from bf16 to fp32 model.to(torch.float32) orig_reduce_scatter = dist.reduce_scatter_tensor def assert_fn(output: torch.Tensor): self.assertEqual(output.dtype, torch.bfloat16) reduce_scatter = functools.partial( reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn ) with patch_reduce_scatter(reduce_scatter): inp = torch.randn((4, 32), device=device_type.type) loss = model(inp).sum() loss.backward() @skip_if_lt_x_gpu(1) def test_dataclass_input(self): @dataclasses.dataclass class Input: x: torch.Tensor class Model(nn.Module): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._layer = nn.Linear(10, 10) def forward(self, input: Input): return self._layer(input.x) mp_policy = MixedPrecisionPolicy( torch.bfloat16, torch.bfloat16, torch.bfloat16, True ) model = Model() inp = Input(torch.randn(2, 10).to(device_type)) fully_shard(model, mp_policy=mp_policy) loss = model(inp).sum() loss.backward() if __name__ == "__main__": run_tests()
TestFullyShardMixedPrecisionCasts
python
openai__openai-python
src/openai/types/responses/response_error_event.py
{ "start": 223, "end": 576 }
class ____(BaseModel): code: Optional[str] = None """The error code.""" message: str """The error message.""" param: Optional[str] = None """The error parameter.""" sequence_number: int """The sequence number of this event.""" type: Literal["error"] """The type of the event. Always `error`."""
ResponseErrorEvent
python
pdm-project__pdm
src/pdm/installers/synchronizers.py
{ "start": 599, "end": 10805 }
class ____(BaseSynchronizer): def install_candidate(self, key: str, progress: Progress) -> Candidate: """Install candidate""" can = self.candidates[key] job = progress.add_task(f"Installing {can.format()}...", text="", total=None) can.prepare(self.environment, RichProgressReporter(progress, job)) try: self.manager.install(can) except Exception: progress.print(f" [error]{termui.Emoji.FAIL}[/] Install {can.format()} failed") raise else: progress.print(f" [success]{termui.Emoji.SUCC}[/] Install {can.format()} successful") finally: progress.remove_task(job) can.prepare(self.environment, CandidateReporter()) return can def update_candidate(self, key: str, progress: Progress) -> tuple[Distribution, Candidate]: """Update candidate""" can = self.candidates[key] dist = self.working_set[strip_extras(key)[0]] dist_version = dist.version job = progress.add_task( f"Updating [req]{key}[/] [warning]{dist_version}[/] -> [warning]{can.version}[/]...", text="", total=None ) can.prepare(self.environment, RichProgressReporter(progress, job)) try: self.manager.overwrite(dist, can) except Exception: progress.print( f" [error]{termui.Emoji.FAIL}[/] Update [req]{key}[/] " f"[warning]{dist_version}[/] " f"-> [warning]{can.version}[/] failed", ) raise else: progress.print( f" [success]{termui.Emoji.SUCC}[/] Update [req]{key}[/] " f"[warning]{dist_version}[/] " f"-> [warning]{can.version}[/] successful", ) finally: progress.remove_task(job) can.prepare(self.environment, CandidateReporter()) return dist, can def remove_distribution(self, key: str, progress: Progress) -> Distribution: """Remove distributions with given names.""" dist = self.working_set[key] dist_version = dist.version job = progress.add_task(f"Removing [req]{key}[/] [warning]{dist_version}[/]...", text="", total=None) try: self.manager.uninstall(dist) except Exception: progress.print( f" [error]{termui.Emoji.FAIL}[/] Remove [req]{key}[/] [warning]{dist_version}[/] failed", ) raise else: progress.print( f" [success]{termui.Emoji.SUCC}[/] Remove [req]{key}[/] [warning]{dist_version}[/] successful" ) finally: progress.remove_task(job) return dist def _show_headline(self, packages: dict[str, list[str]]) -> None: add, update, remove = packages["add"], packages["update"], packages["remove"] if not any((add, update, remove)): self.ui.echo("All packages are synced to date, nothing to do.") return results = ["[bold]Synchronizing working set with resolved packages[/]:"] results.extend( [ f"[success]{len(add)}[/] to add,", f"[warning]{len(update)}[/] to update,", f"[error]{len(remove)}[/] to remove", ] ) self.ui.echo(" ".join(results) + "\n") def _show_summary(self, packages: dict[str, list[str]]) -> None: to_add = [self.candidates[key] for key in packages["add"]] to_update = [(self.working_set[key], self.candidates[key]) for key in packages["update"]] to_remove = [self.working_set[key] for key in packages["remove"]] lines = [] if to_add: lines.append("[bold]Packages to add[/]:") for can in to_add: lines.append(f" - {can.format()}") if to_update: lines.append("[bold]Packages to update[/]:") for prev, cur in to_update: lines.append(f" - [req]{cur.name}[/] [warning]{prev.version}[/] -> [warning]{cur.version}[/]") if to_remove: lines.append("[bold]Packages to remove[/]:") for dist in to_remove: lines.append(f" - [req]{dist.metadata['Name']}[/] [warning]{dist.version}[/]") if lines: self.ui.echo("\n".join(lines)) else: self.ui.echo("All packages are synced to date, nothing to do.") def _fix_pth_files(self) -> None: """Remove the .pdmtmp suffix from the installed packages""" from pathlib import Path lib_paths = self.environment.get_paths() for scheme in ["purelib", "platlib"]: if not Path(lib_paths[scheme]).exists(): continue for path in list(Path(lib_paths[scheme]).iterdir()): if path.suffix == ".pdmtmp": target_path = path.with_suffix("") if target_path.exists(): target_path.unlink() path.rename(target_path) def synchronize(self) -> None: to_add, to_update, to_remove = self.compare_with_working_set() to_do = {"remove": to_remove, "update": to_update, "add": to_add} if self.dry_run: self._show_summary(to_do) return self._show_headline(to_do) handlers = { "add": self.install_candidate, "update": self.update_candidate, "remove": self.remove_distribution, } sequential_jobs = [] parallel_jobs = [] for kind in to_do: for key in to_do[kind]: if key in self.SEQUENTIAL_PACKAGES or not self.parallel: sequential_jobs.append((kind, key)) elif key in self.candidates and self.candidates[key].req.editable: # Editable packages are installed sequentially. sequential_jobs.append((kind, key)) else: parallel_jobs.append((kind, key)) state = SimpleNamespace(errors=[], parallel_failed=[], sequential_failed=[], jobs=[], mark_failed=False) def update_progress(future: Future, kind: str, key: str) -> None: error = future.exception() status.update_spinner(advance=1) # type: ignore[has-type] if error: exc_info = (type(error), error, error.__traceback__) termui.logger.exception("Error occurs %sing %s: ", kind.rstrip("e"), key, exc_info=exc_info) state.parallel_failed.append((kind, key)) state.errors.extend([f"{kind} [success]{key}[/] failed:\n", *traceback.format_exception(*exc_info)]) if self.fail_fast: for future in state.jobs: future.cancel() state.mark_failed = True # get rich progress and live handler to deal with multiple spinners with InstallationStatus(self.ui, "Synchronizing") as status: for i in range(self.retry_times + 1): status.update_spinner(completed=0, total=len(sequential_jobs) + len(parallel_jobs)) for kind, key in sequential_jobs: try: handlers[kind](key, status.progress) except Exception: termui.logger.exception("Error occurs: ") state.sequential_failed.append((kind, key)) state.errors.extend([f"{kind} [success]{key}[/] failed:\n", traceback.format_exc()]) if self.fail_fast: state.mark_failed = True break finally: status.update_spinner(advance=1) if state.mark_failed: break state.jobs.clear() if parallel_jobs: with ThreadPoolExecutor() as executor: for kind, key in parallel_jobs: future = executor.submit(handlers[kind], key, status.progress) future.add_done_callback(functools.partial(update_progress, kind=kind, key=key)) state.jobs.append(future) if ( state.mark_failed or i == self.retry_times or (not state.sequential_failed and not state.parallel_failed) ): break sequential_jobs, state.sequential_failed = state.sequential_failed, [] parallel_jobs, state.parallel_failed = state.parallel_failed, [] state.errors.clear() status.update_spinner(description=f"Retry failed jobs({i + 2}/{self.retry_times + 1})") try: if state.errors: if self.ui.verbosity < termui.Verbosity.DETAIL: status.console.print("\n[error]ERRORS[/]:") status.console.print("".join(state.errors), end="") status.update_spinner(description=f"[error]{termui.Emoji.FAIL}[/] Some package operations failed.") raise InstallationError("Some package operations failed.") if self.install_self: self_key = self.self_key assert self_key self.candidates[self_key] = self.self_candidate word = "a" if self.no_editable else "an editable" status.update_spinner(description=f"Installing the project as {word} package...") if self_key in self.working_set: self.update_candidate(self_key, status.progress) else: self.install_candidate(self_key, status.progress) status.update_spinner(description=f"{termui.Emoji.POPPER} All complete!") finally: # Now we remove the .pdmtmp suffix from the installed packages self._fix_pth_files()
Synchronizer
python
getsentry__sentry
src/sentry/utils/datastructures.py
{ "start": 77, "end": 1785 }
class ____(MutableMapping): """\ An associative data structure in which the ``(key, value)`` pairs form a one-to-one correspondence in both directions. For example, when ``(a, b)`` is added to the mapping, ``b`` can be found when ``a`` is used as a key, and ``a`` can *also* be found when ``b`` is provided to ``get_key``. """ def __init__(self, data): self.__data = data self.__inverse = {v: k for k, v in self.__data.items()} if len(self.__data) != len(self.__inverse): raise ValueError("duplicate value provided") def __getitem__(self, key): return self.__data[key] def __setitem__(self, key, value): if not isinstance(key, Hashable): raise TypeError("key must be hashable") if not isinstance(value, Hashable): raise TypeError("value must be hashable") if value in self.__inverse: raise ValueError("value already present") previous = self.__data.pop(key, __unset__) if previous is not __unset__: assert self.__inverse.pop(previous) == key self.__data[key] = value self.__inverse[value] = key def __delitem__(self, key): del self.__inverse[self.__data.pop(key)] def __iter__(self): return iter(self.__data) def __len__(self) -> int: return len(self.__data) def get_key(self, value, default=__unset__): try: return self.__inverse[value] except KeyError: if default is __unset__: raise else: return default def inverse(self): return self.__inverse.copy()
BidirectionalMapping
python
mahmoud__boltons
boltons/socketutils.py
{ "start": 26146, "end": 28230 }
class ____: """ Reads and writes using the netstring protocol. More info: https://en.wikipedia.org/wiki/Netstring Even more info: http://cr.yp.to/proto/netstrings.txt """ def __init__(self, sock, timeout=DEFAULT_TIMEOUT, maxsize=DEFAULT_MAXSIZE): self.bsock = BufferedSocket(sock) self.timeout = timeout self.maxsize = maxsize self._msgsize_maxsize = len(str(maxsize)) + 1 # len(str()) == log10 def fileno(self): return self.bsock.fileno() def settimeout(self, timeout): self.timeout = timeout def setmaxsize(self, maxsize): self.maxsize = maxsize self._msgsize_maxsize = self._calc_msgsize_maxsize(maxsize) def _calc_msgsize_maxsize(self, maxsize): return len(str(maxsize)) + 1 # len(str()) == log10 def read_ns(self, timeout=_UNSET, maxsize=_UNSET): if timeout is _UNSET: timeout = self.timeout if maxsize is _UNSET: maxsize = self.maxsize msgsize_maxsize = self._msgsize_maxsize else: msgsize_maxsize = self._calc_msgsize_maxsize(maxsize) size_prefix = self.bsock.recv_until(b':', timeout=timeout, maxsize=msgsize_maxsize) try: size = int(size_prefix) except ValueError: raise NetstringInvalidSize('netstring message size must be valid' ' integer, not %r' % size_prefix) if size > maxsize: raise NetstringMessageTooLong(size, maxsize) payload = self.bsock.recv_size(size) if self.bsock.recv(1) != b',': raise NetstringProtocolError("expected trailing ',' after message") return payload def write_ns(self, payload): size = len(payload) if size > self.maxsize: raise NetstringMessageTooLong(size, self.maxsize) data = str(size).encode('ascii') + b':' + payload + b',' self.bsock.send(data)
NetstringSocket
python
walkccc__LeetCode
solutions/2507. Smallest Value After Replacing With Sum of Prime Factors/2507.py
{ "start": 0, "end": 355 }
class ____: def smallestValue(self, n: int) -> int: def getPrimeSum(n: int) -> int: primeSum = 0 for i in range(2, n + 1): while n % i == 0: n //= i primeSum += i return primeSum primeSum = getPrimeSum(n) while n != primeSum: n = primeSum primeSum = getPrimeSum(n) return n
Solution
python
django__django
tests/test_runner_apps/sample/tests_sample.py
{ "start": 361, "end": 551 }
class ____(SimpleTestCase): # Z is used to trick this test case to appear after Vanilla in default # suite def test_sample(self): self.assertEqual(1, 1)
TestZimpleTestCase
python
getsentry__sentry
tests/sentry/workflow_engine/tasks/test_delayed_workflows.py
{ "start": 985, "end": 7159 }
class ____(BaseWorkflowTest, BaseEventFrequencyPercentTest): def setUp(self) -> None: super().setUp() self.workflow1, self.workflow1_if_dcgs = self.create_project_event_freq_workflow( self.project, self.environment, has_when_slow_condition=True ) self.workflow2, self.workflow2_if_dcgs = self.create_project_event_freq_workflow( self.project ) self.project2 = self.create_project() self.environment2 = self.create_environment(project=self.project2) self.event1, self.group1 = self.setup_event(self.project, self.environment, "group-1") self.event2, self.group2 = self.setup_event(self.project, self.environment, "group-2") self.workflow_group_dcg_mapping = { f"{self.workflow1.id}:{self.group1.id}:{self.workflow1.when_condition_group_id}:{self.workflow1_if_dcgs[0].id}:{self.workflow1_if_dcgs[1].id}", f"{self.workflow2.id}:{self.group2.id}::{self.workflow2_if_dcgs[0].id}:{self.workflow2_if_dcgs[1].id}", } self.detector = Detector.objects.get(project_id=self.project.id, type=ErrorGroupType.slug) self.detector_dcg = self.create_data_condition_group() self.detector.update(workflow_condition_group=self.detector_dcg) self.batch_client = DelayedWorkflowClient() self.batch_client.add_project_ids([self.project.id, self.project2.id]) def create_project_event_freq_workflow( self, project: Project, environment: Environment | None = None, has_when_slow_condition: bool = False, ) -> tuple[Workflow, list[DataConditionGroup]]: detector, _ = Detector.objects.get_or_create( project_id=project.id, type=ErrorGroupType.slug, defaults={"config": {}} ) workflow_trigger_group = self.create_data_condition_group( logic_type=DataConditionGroup.Type.ANY_SHORT_CIRCUIT ) if has_when_slow_condition: self.create_data_condition( condition_group=workflow_trigger_group, type=Condition.EVENT_FREQUENCY_COUNT, comparison={"interval": "1h", "value": 100}, condition_result=True, ) workflow = self.create_workflow( when_condition_group=workflow_trigger_group, organization=project.organization, environment=environment, ) self.create_detector_workflow( detector=detector, workflow=workflow, ) workflow_action_slow_filter_group = self.create_data_condition_group( logic_type=DataConditionGroup.Type.ALL ) self.create_data_condition( condition_group=workflow_action_slow_filter_group, type=Condition.EVENT_FREQUENCY_PERCENT, comparison={"interval": "1h", "value": 100, "comparison_interval": "1w"}, condition_result=True, ) workflow_action_filter_group = self.create_data_condition_group( logic_type=DataConditionGroup.Type.ALL ) self.create_data_condition( condition_group=workflow_action_filter_group, type=Condition.EVENT_FREQUENCY_COUNT, comparison={"interval": "1h", "value": 100}, condition_result=True, ) self.create_workflow_data_condition_group( workflow=workflow, condition_group=workflow_action_filter_group ) self.create_workflow_data_condition_group( workflow=workflow, condition_group=workflow_action_slow_filter_group ) return workflow, [workflow_action_slow_filter_group, workflow_action_filter_group] def setup_event(self, project, environment, name): event = self.create_event(project.id, FROZEN_TIME, name, environment.name) assert event.group return event, event.group def push_to_hash( self, project_id: int, workflow_id: int, group_id: int, when_dcg_id: int | None, if_dcgs: list[DataConditionGroup], passing_dcgs: list[DataConditionGroup], event_id: str | None = None, occurrence_id: str | None = None, timestamp: datetime | None = None, ) -> None: value_dict: dict[str, str | None | datetime] = { "event_id": event_id, "occurrence_id": occurrence_id, } if timestamp: value_dict["timestamp"] = timestamp value = json.dumps(value_dict) when_dcg_str = str(when_dcg_id) if when_dcg_id else "" field = f"{workflow_id}:{group_id}:{when_dcg_str}:{','.join([str(dcg.id) for dcg in if_dcgs])}:{','.join([str(dcg.id) for dcg in passing_dcgs])}" self.batch_client.for_project(project_id).push_to_hash( batch_key=None, data={field: value}, ) def _push_base_events(self, timestamp: datetime | None = None) -> None: workflow_to_data = { self.workflow1: ( self.project, self.workflow1.when_condition_group_id, [self.workflow1_if_dcgs[0]], [self.workflow1_if_dcgs[1]], self.event1, self.group1, ), self.workflow2: ( self.project, None, [self.workflow2_if_dcgs[0]], [self.workflow2_if_dcgs[1]], self.event2, self.group2, ), } for workflow, ( project, when_condition_group_id, if_condition_groups, passing_if_groups, event, group, ) in workflow_to_data.items(): self.push_to_hash( project_id=project.id, workflow_id=workflow.id, group_id=group.id, when_dcg_id=when_condition_group_id, if_dcgs=if_condition_groups, passing_dcgs=passing_if_groups, event_id=event.event_id, timestamp=timestamp, )
TestDelayedWorkflowTaskBase
python
pytorch__pytorch
torch/_dynamo/symbolic_convert.py
{ "start": 8089, "end": 10742 }
class ____: """ SpeculationLog replaces the prior copy_graphstate/restore_graphstate checkpointing. Rather than saving/restoring state, we restart the dynamo conversion process over from the beginning -- but when we hit the start of the speculation that failed, we instead generate a graph break. """ entries: list[SpeculationEntry] = dataclasses.field(default_factory=list) index: int = 0 def restart(self) -> None: self.index = 0 def clear(self) -> None: self.entries.clear() self.index = 0 def next( self, filename: str, lineno: int, instruction_pointer: int, inst: Instruction ) -> SpeculationEntry: """ Lookup or create a SpeculationEntry() that is shared across RestartAnalysis calls. Args are used only for debug checks. """ if len(self.entries) == self.index: self.entries.append( SpeculationEntry(filename, lineno, instruction_pointer, inst) ) entry = self.entries[self.index] prev_entry_msg = "" if self.index != 0: prev_entry = self.entries[self.index - 1] prev_entry_msg = ( f"Previous instruction: {prev_entry.filename}:{prev_entry.lineno}" f"({prev_entry.inst.opname} @ {prev_entry.instruction_pointer})\n" ) if not ( entry.instruction_pointer == instruction_pointer and entry.filename == filename and entry.lineno == lineno ): raise SpeculationLogDivergence( f""" SpeculationLog diverged at index {self.index} (log had {len(self.entries)} entries): - Expected: {entry.filename}:{entry.lineno} ({entry.inst.opname} at ip={entry.instruction_pointer}) - Actual: {filename}:{lineno} ({inst.opname} at ip={instruction_pointer}) {prev_entry_msg} There are two usual reasons why this may have occurred: - When Dynamo analysis restarted, the second run took a different path than the first. If this occurred, the previous instruction is the critical instruction that behaved differently. - Speculation entries are only added under certain conditions (as seen in step()), e.g., there must exist operators in the graph; those conditions may have changed on restart. If this divergence was intentional, clear the speculation log before restarting (do NOT do this for graph breaks, you will infinite loop). Otherwise, please submit a bug report, ideally including the contents of TORCH_LOGS=+dynamo """ ) self.index += 1 return entry @dataclasses.dataclass
SpeculationLog
python
streamlit__streamlit
lib/streamlit/errors.py
{ "start": 19600, "end": 20024 }
class ____(LocalizableStreamlitException): """Exception Raised when a time string argument is passed that cannot be parsed.""" def __init__(self, time_string: str) -> None: super().__init__( "Time string doesn't look right. It should be formatted as" "`'1d2h34m'` or `2 days`, for example. Got: {time_string}", time_string=time_string, )
StreamlitBadTimeStringError
python
walkccc__LeetCode
solutions/364. Nested List Weight Sum II/364.py
{ "start": 0, "end": 402 }
class ____: def depthSumInverse(self, nestedList: list[NestedInteger]) -> int: ans = 0 prevSum = 0 q = collections.deque(nestedList) while q: for _ in range(len(q)): ni = q.popleft() if ni.isInteger(): prevSum += ni.getInteger() else: for nextNi in ni.getList(): q.append(nextNi) ans += prevSum return ans
Solution
python
PyCQA__pylint
tests/functional/a/access/access_to_protected_members.py
{ "start": 5481, "end": 7340 }
class ____: """Test for GitHub issue 3066 Accessing of attributes/methods of inner and outer classes https://github.com/pylint-dev/pylint/issues/3066""" attr = 0 _attr = 1 @staticmethod def _bar(i): """Docstring.""" @staticmethod def foobar(i): """Test access from outer class""" Issue3066._attr = 2 Issue3066.Aclass._attr = "y" # [protected-access] Issue3066.Aclass.Bclass._attr = "b" # [protected-access] Issue3066._bar(i) Issue3066.Aclass._bar(i) # [protected-access] Issue3066.Aclass.Bclass._bar(i) # [protected-access] class Aclass: """Inner class for GitHub issue 3066""" _attr = "x" @staticmethod def foobar(i): """Test access from inner class""" Issue3066._attr = 2 # [protected-access] Issue3066.Aclass._attr = "y" Issue3066.Aclass.Bclass._attr = "b" # [protected-access] Issue3066._bar(i) # [protected-access] Issue3066.Aclass._bar(i) Issue3066.Aclass.Bclass._bar(i) # [protected-access] @staticmethod def _bar(i): """Docstring.""" class Bclass: """Inner inner class for GitHub issue 3066""" _attr = "a" @staticmethod def foobar(i): """Test access from inner inner class""" Issue3066._attr = 2 # [protected-access] Issue3066.Aclass._attr = "y" # [protected-access] Issue3066.Aclass.Bclass._attr = "b" Issue3066._bar(i) # [protected-access] Issue3066.Aclass._bar(i) # [protected-access] Issue3066.Aclass.Bclass._bar(i) @staticmethod def _bar(i): """Docstring."""
Issue3066
python
encode__django-rest-framework
tests/test_filters.py
{ "start": 603, "end": 1583 }
class ____(SimpleTestCase): def test_keep_quoted_together_regardless_of_commas(self): assert ['hello, world'] == list(filters.search_smart_split('"hello, world"')) def test_strips_commas_around_quoted(self): assert ['hello, world'] == list(filters.search_smart_split(',,"hello, world"')) assert ['hello, world'] == list(filters.search_smart_split(',,"hello, world",,')) assert ['hello, world'] == list(filters.search_smart_split('"hello, world",,')) def test_splits_by_comma(self): assert ['hello', 'world'] == list(filters.search_smart_split(',,hello, world')) assert ['hello', 'world'] == list(filters.search_smart_split(',,hello, world,,')) assert ['hello', 'world'] == list(filters.search_smart_split('hello, world,,')) def test_splits_quotes_followed_by_comma_and_sentence(self): assert ['"hello', 'world"', 'found'] == list(filters.search_smart_split('"hello, world",found'))
SearchSplitTests
python
tensorflow__tensorflow
tensorflow/python/ops/math_ops_test.py
{ "start": 27577, "end": 36449 }
class ____(test_util.TensorFlowTestCase): # TODO(aselle): Test more types before exposing new division operators. def intTestData(self): nums = np.arange(-10, 10, 1).reshape(20, 1) divs = np.arange(-3, 4, 2).reshape(1, 4) return nums, divs def floatTestData(self): nums = np.arange(-10, 10, .25).reshape(80, 1) divs = np.arange(-3, 0, .25).reshape(1, 12) return nums, divs def numpySafeFloorDivInt(self, x, y): z = x // y # Numpy produces 0 for INT_MIN/-1, but we expect an overflow to INT_MIN # so that (INT_MIN/-1) + (INT_MIN % -1) = INT_MIN + 0 = INT_MIN. z[(x == np.iinfo(x.dtype).min) & (y == -1)] = np.iinfo(x.dtype).min return z def numpySafeFloorModInt(self, x, y): # Numpy crashes with a FPE for INT_MIN % -1. z = self.numpySafeFloorDivInt(x, y) return x - z * y def numpySafeTruncateDivInt(self, x, y): z = self.numpySafeFloorDivInt(x, y) # Round up if non-zero remainder and inputs have opposite signs. z[(x != z * y) & ((x < 0) != (y < 0))] += 1 return z def numpySafeTruncateModInt(self, x, y): # Numpy crashes with a FPE for INT_MIN % -1. z = self.numpySafeTruncateDivInt(x, y) return x - z * y def testFloorModInt(self): nums, divs = self.intTestData() for dtype in [np.int32, np.int64]: x = nums.astype(dtype) y = divs.astype(dtype) tf_result = math_ops.floormod(x, y) np_result = self.numpySafeFloorModInt(x, y) self.assertAllEqual(tf_result, np_result) tf2_result = (array_ops.constant(x) % array_ops.constant(y)) self.assertAllEqual(tf2_result, tf_result) def testFloorModFloat(self): nums, divs = self.floatTestData() for dtype in [np.float16, np.float32, np.float64]: x = nums.astype(dtype) y = divs.astype(dtype) tf_result = math_ops.floormod(x, y) np_result = x % y self.assertAllEqual(tf_result, np_result) tf2_result = (array_ops.constant(x) % array_ops.constant(y)) self.assertAllEqual(tf2_result, tf_result) def testFloorModBfloat16(self): nums, divs = self.floatTestData() tf_result = math_ops.floormod( math_ops.cast(nums, dtypes.bfloat16), math_ops.cast(divs, dtypes.bfloat16)) np_result = nums % divs self.assertAllEqual(tf_result, np_result) def testTruncateModInt(self): nums, divs = self.intTestData() tf_result = math_ops.truncatemod(nums, divs) np_result = np.fmod(nums, divs) self.assertAllEqual(tf_result, np_result) def testTruncateModFloat(self): nums, divs = self.floatTestData() tf_result = math_ops.truncatemod(nums, divs) np_result = np.fmod(nums, divs) self.assertAllEqual(tf_result, np_result) def testFloorDivideInt(self): nums, divs = self.intTestData() tf_result = math_ops.floor_div(nums, divs) np_result = self.numpySafeFloorDivInt(nums, divs) self.assertAllEqual(tf_result, np_result) tf2_result = (array_ops.constant(nums) // array_ops.constant(divs)) self.assertAllEqual(tf2_result, tf_result) def testTruncateDivideInt(self): nums, divs = self.intTestData() tf_result = math_ops.truncatediv(nums, divs) np_result = self.numpySafeTruncateDivInt(nums, divs) self.assertAllEqual(tf_result, np_result) def testTruncateDivideFloat(self): nums, divs = self.floatTestData() tf_result = math_ops.truncatediv(nums, divs) np_result = np.trunc(nums / divs) self.assertAllEqual(tf_result, np_result) @test_util.deprecated_graph_mode_only def testDivideName(self): op = math_ops.divide( array_ops.constant(3), array_ops.constant(4), name="my_cool_divide") self.assertEqual(op.name, "my_cool_divide:0") def testRealDiv(self): nums, divs = self.floatTestData() tf_result = math_ops.realdiv(nums, divs) np_result = np.divide(nums, divs) self.assertAllClose(tf_result, np_result) def testDivideType(self): a = array_ops.constant([2], dtype=dtypes.int32) # Since __future__.division is effect, we should always upgrade to float64 b = math_ops.divide(a, 1) self.assertEqual(b.dtype, dtypes.float64) self.assertEqual(2.0, self.evaluate(b)) c = math_ops.divide(a, 4) self.assertEqual(c.dtype, dtypes.float64) self.assertEqual(0.5, self.evaluate(c)) def testComplexDiv(self): foo = array_ops.constant([1. + 3.j]) _ = math_ops.divide(foo, 1.) _ = math_ops.div(foo, 2.) def testFloorDivGrad(self): a = variables.Variable(2.) b = variables.Variable(4.) input_vars = [a, b] self.evaluate(variables.global_variables_initializer()) if context.executing_eagerly(): # TDOO(rmlarsen): Is there a more compact way of # writing this for multiple expressions? with backprop.GradientTape() as tape: tape.watch(input_vars) c_grad0 = tape.gradient(math_ops.divide(a, b), input_vars) with backprop.GradientTape() as tape: tape.watch(input_vars) c_grad1 = tape.gradient(math_ops.div(a, b), input_vars) with backprop.GradientTape() as tape: tape.watch(input_vars) c_grad2 = tape.gradient(math_ops.floordiv(a, b), input_vars) else: c_grad0 = gradients.gradients(math_ops.divide(a, b), input_vars) c_grad1 = gradients.gradients(math_ops.div(a, b), input_vars) c_grad2 = gradients.gradients(math_ops.floordiv(a, b), input_vars) self.assertAllEqual([self.evaluate(x) for x in c_grad0], [.25, -.125]) self.assertAllEqual([self.evaluate(x) for x in c_grad1], [.25, -.125]) self.assertAllEqual( [None if x is None else self.evaluate(x) for x in c_grad2], [None, None]) def testConsistent(self): nums, divs = self.intTestData() tf_result = ( math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs)) tf_nums = array_ops.constant(nums) tf_divs = array_ops.constant(divs) tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs) np_result = (nums // divs) * divs + (nums % divs) # Consistent with numpy self.assertAllEqual(tf_result, np_result) # Consistent with two forms of divide self.assertAllEqual(tf_result, tf2_result) # consistency for truncation form tf3_result = ( math_ops.truncatediv(nums, divs) * divs + math_ops.truncatemod(nums, divs)) expanded_nums = np.reshape( np.tile(nums, divs.shape[1]), (nums.shape[0], divs.shape[1])) # Consistent with desire to get numerator self.assertAllEqual(tf3_result, expanded_nums) # Consistent with desire to get numerator self.assertAllEqual(tf_result, expanded_nums) def testWithPythonValue(self): # Test case for https://github.com/tensorflow/tensorflow/issues/39475 x = math_ops.divide(5, 2) self.assertIsInstance(x, tensor_lib.Tensor) x = math_ops.divide(5, array_ops.constant(2.0)) self.assertIsInstance(x, tensor_lib.Tensor) def intEdgeTestData(self, dtype): """Edge-case test data for integer types.""" # INT_MIN/-1 will produce signed-integer overflow, so we instead test # (INT_MIN + 1) / -1. nums = np.array( [ [np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max], [np.iinfo(dtype).min + 1, -1, 1, np.iinfo(dtype).max], [np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max], [np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max], ], dtype=dtype, ) divs = np.array( [ [ np.iinfo(dtype).min, np.iinfo(dtype).min, np.iinfo(dtype).min, np.iinfo(dtype).min, ], [-1, -1, -1, -1], [1, 1, 1, 1], [ np.iinfo(dtype).max, np.iinfo(dtype).max, np.iinfo(dtype).max, np.iinfo(dtype).max, ], ], dtype=dtype, ) return nums, divs def testFloorDivModIntEdges(self): for dtype in [np.int32, np.int64]: x, y = self.intEdgeTestData(dtype) tf_floor_div = math_ops.floor_div(x, y) np_floor_div = self.numpySafeFloorDivInt(x, y) self.assertAllEqual(tf_floor_div, np_floor_div) tf_floor_mod = math_ops.floormod(x, y) np_floor_mod = self.numpySafeFloorModInt(x, y) self.assertAllEqual(tf_floor_mod, np_floor_mod) def testTruncateDivModIntEdges(self): for dtype in [np.int32, np.int64]: x, y = self.intEdgeTestData(dtype) tf_truncate_div = math_ops.truncatediv(x, y) np_truncate_div = self.numpySafeTruncateDivInt(x, y) self.assertAllEqual(tf_truncate_div, np_truncate_div) tf_truncate_mod = math_ops.truncatemod(x, y) np_truncate_mod = self.numpySafeTruncateModInt(x, y) self.assertAllEqual(tf_truncate_mod, np_truncate_mod) @test_util.run_all_in_graph_and_eager_modes
DivAndModTest
python
huggingface__transformers
src/transformers/models/qwen2_moe/modular_qwen2_moe.py
{ "start": 6703, "end": 6968 }
class ____(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": OutputRecorder(Qwen2MoeTopKRouter, index=0), "hidden_states": Qwen2MoeDecoderLayer, "attentions": Qwen2MoeAttention, } @auto_docstring
Qwen2MoePreTrainedModel
python
huggingface__transformers
src/transformers/models/sam/configuration_sam.py
{ "start": 780, "end": 2838 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a similar configuration to that of the SAM-vit-h [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. image_size (`int`, *optional*, defaults to 1024): The expected output resolution of the image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. mask_input_channels (`int`, *optional*, defaults to 16): The number of channels to be fed to the `MaskDecoder` module. num_point_embeddings (`int`, *optional*, defaults to 4): The number of point embeddings to be used. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the encoder and pooler. """ base_config_key = "prompt_encoder_config" def __init__( self, hidden_size=256, image_size=1024, patch_size=16, mask_input_channels=16, num_point_embeddings=4, hidden_act="gelu", layer_norm_eps=1e-6, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.image_size = image_size self.patch_size = patch_size self.image_embedding_size = image_size // patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps
SamPromptEncoderConfig
python
getsentry__sentry
src/sentry/seer/similarity/utils.py
{ "start": 3338, "end": 3644 }
class ____(StrEnum): INGEST = "ingest" BACKFILL = "backfill" DELETION = "deletion" SIMILAR_ISSUES_TAB = "similar_issues_tab" def _get_value_if_exists(exception_value: Mapping[str, Any]) -> str: return exception_value["values"][0] if exception_value.get("values") else ""
ReferrerOptions
python
doocs__leetcode
solution/2500-2599/2572.Count the Number of Square-Free Subsets/Solution.py
{ "start": 0, "end": 729 }
class ____: def squareFreeSubsets(self, nums: List[int]) -> int: primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] cnt = Counter(nums) mod = 10**9 + 7 n = len(primes) f = [0] * (1 << n) f[0] = pow(2, cnt[1]) for x in range(2, 31): if cnt[x] == 0 or x % 4 == 0 or x % 9 == 0 or x % 25 == 0: continue mask = 0 for i, p in enumerate(primes): if x % p == 0: mask |= 1 << i for state in range((1 << n) - 1, 0, -1): if state & mask == mask: f[state] = (f[state] + cnt[x] * f[state ^ mask]) % mod return sum(v for v in f) % mod - 1
Solution
python
spyder-ide__spyder
spyder/plugins/profiler/widgets/main_widget.py
{ "start": 1408, "end": 1503 }
class ____: Locals = 'locals_section' Other = "other_section"
ProfilerContextMenuSections
python
google__jax
tests/hijax_test.py
{ "start": 1645, "end": 4493 }
class ____(HiType): shape: tuple[int, int] # how to lower to (lo)jax types def lo_ty(self) -> list[ShapedArray]: m, k = self.shape return [ShapedArray((m, k), jnp.dtype('int8')), ShapedArray((m, ), jnp.dtype('float32'))] # these next two are essentially the pytree interface def lower_val(self, hi_val: QArray) -> list[jax.Array]: return [hi_val.arr, hi_val.scale] def raise_val(self, arr, scale) -> QArray: return QArray(arr, scale) # alternative: LowerTrace def ref_get_abstract_eval(self, ref_aval, *args, tree): arr_aval = core.ShapedArray(self.shape, jnp.dtype('float32')) updated_ref = ref_aval.update(inner_aval=arr_aval) out, effects = state_primitives.get_p.abstract_eval( updated_ref, *args, tree=tree ) assert isinstance(out, core.ShapedArray) return QArrayTy(out.shape), effects def ref_swap_abstract_eval(self, ref_aval, val_aval, *args, tree): arr_aval = core.ShapedArray(self.shape, jnp.dtype('float32')) val_arr_aval = core.ShapedArray(val_aval.shape, jnp.dtype('float32')) updated_ref = ref_aval.update(inner_aval=arr_aval) out_aval, effects = state_primitives.swap_p.abstract_eval( updated_ref, val_arr_aval,*args, tree=tree ) assert isinstance(out_aval, core.ShapedArray) return QArrayTy(out_aval.shape), effects def ref_get_to_lojax(self, ref: state.TransformedRef | jax.Ref, idx: indexing.NDIndexer): if isinstance(ref, state.TransformedRef): if ref.transforms: raise NotImplementedError(ref) ref = ref.ref # Unpack Ref type ref = ref._refs if not all(i.start == 0 and i.size == s for i, s in zip(idx.indices, ref.arr.shape)): raise NotImplementedError outs = [out.get() for out in self.lower_val(ref)] return self.raise_val(*outs) def ref_swap_to_lojax(self, ref: state.TransformedRef | jax.Ref, val: jax.Array, idx: indexing.NDIndexer): if isinstance(ref, state.TransformedRef): if ref.transforms: raise NotImplementedError(ref) ref = ref.ref # Unpack Ref type ref = ref._refs if not all(i.start == 0 and i.size == s for i, s in zip(idx.indices, ref.arr.shape)): raise NotImplementedError outs = [out.swap(val) for out, val in zip(self.lower_val(ref), self.lower_val(val))] return self.raise_val(*outs) # autodiff def to_tangent_aval(self): return self # different from what a pytree would do! def vspace_zero(self): m, k = self.shape return QArray(jnp.zeros((m, k), jnp.dtype('int8')), jnp.ones ((m, ), jnp.dtype('float32'))) register_hitype(QArray, lambda q: QArrayTy(q.arr.shape)) def to_qarray(x): return to_qarray_p.bind(x) def from_qarray(x): return from_qarray_p.bind(x)
QArrayTy
python
PrefectHQ__prefect
tests/server/models/test_artifacts.py
{ "start": 13790, "end": 19289 }
class ____: @pytest.fixture async def artifacts(self, session, flow_run, task_run): artifacts = [ schemas.core.Artifact( key="key-1", data=1, type="markdown", flow_run_id=flow_run.id, description="Some info about my artifact", ), schemas.core.Artifact( key="key-1", data=2, type="markdown", flow_run_id=flow_run.id, description="Some info about my artifact", ), schemas.core.Artifact( key="key-2", data=3, type="table", flow_run_id=flow_run.id, task_run_id=task_run.id, description="Some info about my artifact", ), schemas.core.Artifact( key="key-3", data=4, type="table", description="Some info about my artifact", ), ] for artifact_schema in artifacts: await models.artifacts.create_artifact( session=session, artifact=artifact_schema, ) return artifacts async def test_read_latest_artifacts( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, ) assert len(read_artifacts) == 3 assert {a.key for a in read_artifacts} == {"key-1", "key-2", "key-3"} assert {a.data for a in read_artifacts} == {2, 3, 4} async def test_read_latest_artifacts_with_artifact_type_filter( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, artifact_filter=schemas.filters.ArtifactCollectionFilter( type=schemas.filters.ArtifactCollectionFilterType(any_=["table"]), ), ) assert len(read_artifacts) == 2 assert {a.key for a in read_artifacts} == {"key-2", "key-3"} assert {a.data for a in read_artifacts} == {3, 4} async def test_read_latest_artifacts_with_artifact_key_filter( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, artifact_filter=schemas.filters.ArtifactCollectionFilter( key=schemas.filters.ArtifactCollectionFilterKey(any_=["key-1"]), ), ) assert len(read_artifacts) == 1 assert artifacts[1].id == read_artifacts[0].latest_id async def test_read_latest_artifacts_with_flow_run_id_filter( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, artifact_filter=schemas.filters.ArtifactCollectionFilter( flow_run_id=schemas.filters.ArtifactCollectionFilterFlowRunId( any_=[artifacts[0].flow_run_id] ), ), sort=schemas.sorting.ArtifactCollectionSort.KEY_ASC, ) assert len(read_artifacts) == 2 assert artifacts[1].id == read_artifacts[0].latest_id assert artifacts[2].id == read_artifacts[1].latest_id async def test_read_latest_artifacts_with_task_run_id_filter( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, artifact_filter=schemas.filters.ArtifactCollectionFilter( task_run_id=schemas.filters.ArtifactCollectionFilterTaskRunId( any_=[artifacts[2].task_run_id] ), ), ) assert len(read_artifacts) == 1 assert artifacts[2].id == read_artifacts[0].latest_id async def test_read_latest_artifacts_with_limit( self, artifacts, session, ): read_artifacts = await models.artifacts.read_latest_artifacts( session=session, limit=1, sort=schemas.sorting.ArtifactCollectionSort.KEY_DESC, artifact_filter=schemas.filters.ArtifactCollectionFilter(), ) assert len(read_artifacts) == 1 assert read_artifacts[0].key == artifacts[-1].key async def test_reading_latest_artifacts_by_flow_name(self, flow_artifacts, session): flow_name = flow_artifacts[0].name result = await models.artifacts.read_latest_artifacts( session=session, flow_filter=schemas.filters.FlowFilter( name=schemas.filters.FlowFilterName(any_=[flow_name]) ), ) assert len(result) == 1 assert result[0].latest_id == flow_artifacts[2].id async def test_reading_latest_artifacts_by_deployment( self, deployment_artifacts, session ): deployment_id = deployment_artifacts[0].deployment_id result = await models.artifacts.read_latest_artifacts( session=session, deployment_filter=schemas.filters.DeploymentFilter( id=schemas.filters.DeploymentFilterId(any_=[deployment_id]) ), ) assert len(result) == 1 assert result[0].latest_id == deployment_artifacts[2].id
TestReadLatestArtifacts
python
google__jax
jax/_src/pallas/mosaic_gpu/pipeline.py
{ "start": 17247, "end": 40303 }
class ____(Protocol): """Protocol for a warp specialized pipeline.""" def __call__( self, *gmem_refs: Any, allocations: Any | None = None, ) -> None: ... def get_allocations(self, *gmem_refs: Any) -> Any: ... def emit_pipeline_warp_specialized( body: Callable[..., None], *, grid: pallas_core.TupleGrid, memory_registers: int, in_specs: BlockSpecPytree = (), out_specs: BlockSpecPytree = (), max_concurrent_steps: int = 2, wg_axis: str, num_compute_wgs: int, pipeline_state: jax.Array | PipelinePipeline | None = None, manual_consumed_barriers: bool = False, compute_context: ComputeContext | None = None, memory_thread_idx: int | None = None, ) -> WarpSpecializedPipeline: """Creates a function to emit a warp-specialized pipeline. The ``body`` function should have the following signature (without carry). ``consumed_barriers`` is an optional argument that is only passed if the ``manual_consumed_barriers`` argument is True:: def body(indices, *input_refs, *output_refs, *consumed_barriers) -> None: or with a carries enabled (enabled via the ``compute_context`` argument), where the body returns the next carry:: def body( indices, *input_refs, *output_refs, *consumed_barriers, carry ) -> Carry: When ``manual_consumed_barriers`` is True, the user must arrive on all the consumed barriers from all compute warpgroups at each pipeline step. Args: body: The pipeline body. grid: The grid to use for the pipeline. memory_registers: The number of registers to reserve for the memory thread. For H100 GPUs, 40 is a reasonable value. in_specs: The block specs for the inputs. out_specs: The block specs for the outputs. max_concurrent_steps: The maximum number of sequential stages that are active concurrently. Defaults to 2. wg_axis: The axis name for the warp group axis. num_compute_wgs: The number of compute warpgroups manual_consumed_barriers: If True, consumed barriers will be passed into the body function after the output refs. There will be one barrier per input and will be passed in the same order. compute_context: If specified, enables carries in the pipeline and allows a user-specified prologue/epilogue that is only executed in the compute thread. The signature of the pipeline body function will be modified such that the last argument will be the current carry and it must return the next carry. The compute_context itself should follow the signature of `ComputeContext` and take a pipeline function as its sole argument. Calling the pipeline with the initial carry will run the pipeline and return the final carry. memory_thread_idx: The index of the memory thread. If not specified, defaults to the last thread. pipeline_state: If multiple pipelines that have almost the same parameters (only in/out_specs and body can differ) are going to be evaluated in sequence, this argument can be used to avoid pipeline bubbles between their invocations. The first pipeline in the sequence should use the ``START`` state, followed by an arbitrary number of ``STEADY`` states, followed by a single ``STOP`` state. Note that until the pipeline with ``STOP`` is done, the memory thread will not wait for the compute threads to complete and fully consume their work. Any modification of their operands other than invoking another pipeline is disallowed. Important: To achieve bubble-free execution, it is important to also use the manual allocation mode by calling ``get_allocations`` on the returned function, passing the result to ``pl.run_scoped`` and the provided results to the returned function as an ``allocations`` keyword argument. Otherwise, the pipeline function will perform the scoped allocation itself which can lead to synchronization that can still cause pipeline bubbles. """ # TODO(justinfu): Factor out common code between warp-specialized and # normal pipelines. if not isinstance(in_specs, (list, tuple)): in_specs = (in_specs,) if not isinstance(out_specs, (list, tuple)): out_specs = (out_specs,) if isinstance(in_specs, list): in_specs = tuple(in_specs) if isinstance(out_specs, list): out_specs = tuple(out_specs) flat_in_specs, in_specs_treedef = jax.tree.flatten(in_specs) flat_in_specs = tuple(map(_downcast_spec, flat_in_specs)) for spec in flat_in_specs: if len(spec.collective_axes) > 1: raise ValueError( "Only a single collective axis supported in input BlockSpecs, but" f" got {spec.collective_axes}" ) collective_axes = tuple(frozenset( a for spec in flat_in_specs for a in spec.collective_axes )) flat_out_specs, out_specs_treedef = jax.tree.flatten(out_specs) flat_out_specs = tuple(map(_downcast_spec, flat_out_specs)) for spec in flat_out_specs: if spec.collective_axes: raise ValueError("Output BlockSpecs cannot have collective_axes") delay_release = None for in_spec in in_specs: if not isinstance(in_spec, gpu_core.BlockSpec): delay_release = 0 continue delay_release = in_spec.delay_release if in_spec.delay_release != delay_release: raise NotImplementedError( "All inputs must have the same delay_release, but" f" {in_spec.delay_release=} != {delay_release=}" ) delay_release = delay_release or 0 if max_concurrent_steps <= delay_release: raise ValueError( "max_concurrent_steps must be greater than delay_release, but" f" {max_concurrent_steps=}, {delay_release=}" ) if memory_thread_idx is None: memory_thread_idx = num_compute_wgs if memory_thread_idx != num_compute_wgs: # TODO(justinfu): Indexing calculations for buffers assume the memory # thread is the last thread. raise NotImplementedError("Memory thread must be the last thread.") has_carry = compute_context is not None # Trace the index maps to determine if they depend on the grid. # Grid-independent values will not be multiple-buffered. in_spec_has_seq_axis = [ not _is_index_invariant(spec, grid) for spec in flat_in_specs] out_spec_has_seq_axis = [ not _is_index_invariant(spec, grid) for spec in flat_out_specs] spec_has_seq_axis = [*in_spec_has_seq_axis, *out_spec_has_seq_axis] if not all(in_spec_has_seq_axis): raise NotImplementedError("Only inputs with a dependency on the grid are supported.") num_steps = math.prod(grid) has_dynamic_grid = not isinstance(num_steps, int) def _get_slot(step, has_seq_dim): """Returns the buffer slot given the pipeline step.""" if has_seq_dim: return step else: return 0 # Shrink ``max_concurrent_steps`` if the total number of steps is lower to # reduce the size of the refs allocated in SMEM. if not has_dynamic_grid and max_concurrent_steps > num_steps: max_concurrent_steps = cast(int, num_steps) def _get_scoped_allocs(*gmem_refs: AbstractRefPytree): in_gmem_refs = gmem_refs[:len(in_specs)] out_gmem_refs = gmem_refs[len(in_specs):] flat_in_gmem_refs, in_gmem_refs_treedef = jax.tree.flatten(in_gmem_refs) flat_out_gmem_refs, out_gmem_refs_treedef = jax.tree.flatten(out_gmem_refs) if in_specs_treedef != in_gmem_refs_treedef: raise ValueError( "Input specs and input gmem refs must have the same pytree structure." f" {in_specs_treedef} != {in_gmem_refs_treedef}" ) if out_specs_treedef != out_gmem_refs_treedef: raise ValueError( "Output specs and output gmem refs must have the same pytree structure." f" {out_specs_treedef} != {out_gmem_refs_treedef}" ) flat_gmem_refs = [*flat_in_gmem_refs, *flat_out_gmem_refs] smem_allocs = [] for spec, has_seq_dim, gmem_ref in zip( it.chain(flat_in_specs, flat_out_specs), spec_has_seq_axis, flat_gmem_refs): slots = max_concurrent_steps if has_seq_dim else 1 smem_allocs.append( gpu_core.SMEM( (slots, *spec.block_shape), # type: ignore gmem_ref.dtype, transforms=getattr(spec, "transforms", ()), ) ) flat_in_smem_refs, flat_out_smem_refs = util.split_list( smem_allocs, [len(flat_in_specs)]) in_smem_barrier = gpu_core.Barrier(num_arrivals=len(flat_in_specs), num_barriers=max_concurrent_steps) flat_consumed_barriers = [] consumed_barrier_type: Any if collective_axes: consumed_barrier_type = functools.partial( gpu_core.ClusterBarrier, collective_axes=collective_axes # type: ignore ) else: consumed_barrier_type = gpu_core.Barrier for _ in flat_in_specs: if manual_consumed_barriers: flat_consumed_barriers.append( consumed_barrier_type( num_arrivals=num_compute_wgs, num_barriers=max_concurrent_steps, ) ) if not manual_consumed_barriers: # We only allocated one consumed barrier for all inputs when using # automatic consumed barriers. flat_consumed_barriers = [ consumed_barrier_type( num_arrivals=num_compute_wgs, num_barriers=max_concurrent_steps, ) ] return dict( flat_in_smem_refs=flat_in_smem_refs, flat_out_smem_refs=flat_out_smem_refs, in_smem_barrier_ref=in_smem_barrier, flat_consumed_barrier_refs=flat_consumed_barriers, ) def pipeline(*gmem_refs: AbstractRefPytree, allocations: Any | None = None): """ Run the pipeline. Args: *gmem_refs: A list of pytrees of pallas refs allocations: The allocation provided by ``pl.run_scoped`` when the result of calling ``get_allocations(*gmem_refs)`` is passed to ``pl.run_scoped``. """ in_gmem_refs = gmem_refs[:len(in_specs)] out_gmem_refs = gmem_refs[len(in_specs):] flat_in_gmem_refs, in_gmem_refs_treedef = jax.tree.flatten(in_gmem_refs) flat_out_gmem_refs, out_gmem_refs_treedef = jax.tree.flatten(out_gmem_refs) if in_specs_treedef != in_gmem_refs_treedef: raise ValueError( "Input specs and input gmem refs must have the same pytree structure." f" {in_specs_treedef} != {in_gmem_refs_treedef}" ) if out_specs_treedef != out_gmem_refs_treedef: raise ValueError( "Output specs and output gmem refs must have the same pytree structure." f" {out_specs_treedef} != {out_gmem_refs_treedef}" ) if allocations is None: if pipeline_state is not None: raise ValueError( "Pipeline state should not be set when using automatic allocation." ) return pl.run_scoped( functools.partial( scoped_pipeline, flat_in_gmem_refs=flat_in_gmem_refs, flat_out_gmem_refs=flat_out_gmem_refs, ), **_get_scoped_allocs(*gmem_refs), collective_axes=wg_axis, ) else: scoped_pipeline( flat_in_gmem_refs=flat_in_gmem_refs, flat_out_gmem_refs=flat_out_gmem_refs, **allocations, ) pipeline.get_allocations = _get_scoped_allocs def scoped_pipeline( *, flat_in_gmem_refs, flat_out_gmem_refs, flat_in_smem_refs, flat_out_smem_refs, in_smem_barrier_ref, flat_consumed_barrier_refs, ): flat_in_brefs: Sequence[BufferedRef] = [ BufferedRef(spec, not has_seq_axis, gmem_ref, smem_ref) for spec, has_seq_axis, gmem_ref, smem_ref in zip( flat_in_specs, in_spec_has_seq_axis, flat_in_gmem_refs, flat_in_smem_refs ) ] flat_out_brefs: Sequence[BufferedRef] = [ BufferedRef(spec, not has_seq_axis, gmem_ref, smem_ref) for spec, has_seq_axis, gmem_ref, smem_ref in zip( flat_out_specs, out_spec_has_seq_axis, flat_out_gmem_refs, flat_out_smem_refs ) ] def compute_block(): gpu_primitives.set_max_registers( _compute_registers(memory_registers, num_compute_wgs), action="increase") # This is true if any of the outputs need to be transferred inside the loop. smem_out_brefs = [bref for bref in flat_out_brefs if _in_smem(bref.spec)] # The implementation below has races when we have multiple compute WGs. # The problem is that we expect the compute WGs to deal with issuing the # SMEM->GMEM copies, but (1) we never predicate them, so we repeat the # same copy multiple times, and (2) we don't synchronize the compute WGs # in any way. In the unlikely event that one of the compute WGs runs 2 # steps ahead, it might start overwriting the output buffer before the # other WG has issued its copy. # # The best fix here would be to move the SMEM->GMEM copies into the memory # WG and use proper barriers (with arrival_count=2) to ensure all WGs have # produced their outputs before it is sent out to GMEM. if smem_out_brefs and num_compute_wgs > 1: raise NotImplementedError( "SMEM outputs are not supported with multiple compute warpgroups" ) copies_out_in_loop = not all(bref.is_index_invariant for bref in smem_out_brefs) needs_epilogue = any(bref.is_index_invariant for bref in smem_out_brefs) def compute_loop_body(step, carry): indices, last_store_slices, prev_body_carry = carry slot = lax.rem(step, max_concurrent_steps) consumed_slot = lax.rem(step - delay_release, max_concurrent_steps) # Wait for the current GMEM->SMEM copies to complete. gpu_primitives.barrier_wait(in_smem_barrier_ref.at[_get_slot(slot, True)]) # Wait for the previous output SMEM->GMEM copy to complete. if copies_out_in_loop: gpu_primitives.wait_smem_to_gmem( max_concurrent_steps - 1, wait_read_only=True ) in_brefs = jax.tree.unflatten(in_specs_treedef, flat_in_brefs) out_brefs = jax.tree.unflatten(out_specs_treedef, flat_out_brefs) all_brefs = (*in_brefs, *out_brefs) body_args = map_brefs( lambda bref: bref.get_ref_for_slot( _get_slot(slot, not bref.is_index_invariant) ), all_brefs, ) if manual_consumed_barriers: barriers = jax.tree.unflatten( in_specs_treedef, [barrier.at[consumed_slot] for barrier in flat_consumed_barrier_refs], ) body_args = (*body_args, *barriers) if has_carry: body_args = (*body_args, prev_body_carry) next_body_carry = body(indices, *body_args) if not manual_consumed_barriers: [consumed_barrier_ref] = flat_consumed_barrier_refs if delay_release > 0: lax.cond( step < delay_release, lambda: None, lambda: gpu_primitives.barrier_arrive(consumed_barrier_ref.at[consumed_slot]), ) else: gpu_primitives.barrier_arrive(consumed_barrier_ref.at[consumed_slot]) # TODO(justinfu,apaszke): This should probably be done by the memory WG. # Copy the output from SMEM to GMEM. if copies_out_in_loop: gpu_primitives.commit_smem() new_store_slices = last_store_slices[:] for idx, bref in enumerate(flat_out_brefs): if bref.is_index_invariant: assert last_store_slices[idx] is None continue assert last_store_slices[idx] is not None new_store_slices[idx] = tuple( _Slice(s.start, s.size) for s in bref.compute_gmem_slice(indices) ) are_same_slices = map( lambda old, new: old == new, last_store_slices[idx], new_store_slices[idx], ) slices_changed = ~functools.reduce(lax.bitwise_and, are_same_slices) bref.copy_out(_get_slot(slot, not bref.is_index_invariant), indices, predicate=slices_changed) gpu_primitives.commit_smem_to_gmem_group() next_indices = _inc_grid_by_1(indices, grid) return (next_indices, new_store_slices, next_body_carry) init_indices = (jnp.asarray(0, dtype=jnp.int32),) * len(grid) # TODO(justinfu): Only store base pointer instead of all indices. last_store_slices = [ None if bref.is_index_invariant else (_Slice(-1, -1),) * len(bref.spec.block_shape) for bref in flat_out_brefs ] if has_carry: last_indices = None def pipeline_callback(user_init_carry): nonlocal last_indices if last_indices is not None: raise ValueError( "Cannot call pipeline more than once in `compute_context`") init_loop_carry = (init_indices, last_store_slices, user_init_carry) last_indices, _, final_body_carry = lax.fori_loop(0, num_steps, compute_loop_body, init_loop_carry) return final_body_carry compute_context(pipeline_callback) if last_indices is None: raise ValueError("Pipeline was not called in `compute_context`") else: assert compute_context is None last_indices, _, _ = lax.fori_loop( 0, num_steps, compute_loop_body, (init_indices, last_store_slices, None) ) # Handle index_invariant outputs after the loop. They are not # written in the main pipeline loop. if not copies_out_in_loop and needs_epilogue: gpu_primitives.commit_smem() if needs_epilogue: last_slot = lax.rem(num_steps - 1, max_concurrent_steps) for bref in flat_out_brefs: if bref.is_index_invariant: bref.copy_out(_get_slot(last_slot, has_seq_dim=False), last_indices, predicate=None) gpu_primitives.commit_smem_to_gmem_group() if smem_out_brefs: # Finalize the pipeline. gpu_primitives.wait_smem_to_gmem(0) # The memory thread executes this block which issues all pipelined DMAs. # TODO(apaszke,justinfu): Use a single arrive_expect_tx for all transfers. def memory_block(): gpu_primitives.set_max_registers(memory_registers, action="decrease") indices = (jnp.asarray(0, dtype=jnp.int32),) * len(grid) if has_dynamic_grid: prologue_steps = lax.min(max_concurrent_steps, num_steps) else: assert max_concurrent_steps <= num_steps prologue_steps = max_concurrent_steps pipeline_init_prologue_steps = prologue_steps if pipeline_state is not None: if has_dynamic_grid: raise NotImplementedError( "A pipeline of pipelines is not supported with dynamic grids" ) if num_steps % max_concurrent_steps: raise NotImplementedError( "A pipeline of pipelines is only allowed when the number of steps" f" (product of grid, here {num_steps}) is divisible by" f" {max_concurrent_steps=}" ) if delay_release: raise NotImplementedError( "A pipeline of pipelines is not supported with delay_release" ) if isinstance(pipeline_state, PipelinePipeline): prologue_steps = prologue_steps if pipeline_state == PipelinePipeline.START else 0 else: prologue_steps = jnp.where(pipeline_state == PipelinePipeline.START, prologue_steps, 0) # Begin initial copies. def _init_step(step, indices): for bref in flat_in_brefs: buf_slot = _get_slot(step, not bref.is_index_invariant) barrier_slot = _get_slot(step, True) bref.copy_in(buf_slot, indices, in_smem_barrier_ref, barrier_slot) return _inc_grid_by_1(indices, grid) indices = jax.lax.fori_loop( 0, prologue_steps, _init_step, indices, unroll=not has_dynamic_grid ) def memory_loop_body(step, carry): indices, = carry slot = lax.rem(step, max_concurrent_steps) fetch_slot = slot # (x + y) % y == x % y if not manual_consumed_barriers: # We only have one consumed barrier when using automatic consumed # barrier management. [consumed_barrier_ref] = flat_consumed_barrier_refs gpu_primitives.barrier_wait(consumed_barrier_ref.at[slot]) consumed_barrier_it = [None] * len(flat_in_brefs) else: consumed_barrier_it = flat_consumed_barrier_refs for bref, consumed_barrier in zip(flat_in_brefs, consumed_barrier_it): if manual_consumed_barriers: gpu_primitives.barrier_wait(consumed_barrier.at[slot]) # pytype: disable=attribute-error buf_slot = _get_slot(fetch_slot, not bref.is_index_invariant) barrier_slot = _get_slot(fetch_slot, True) bref.copy_in(buf_slot, indices, in_smem_barrier_ref, barrier_slot) next_indices = _inc_grid_by_1(indices, grid) return (next_indices,) lax.fori_loop(0, num_steps - prologue_steps, memory_loop_body, (indices,)) # Await all the arrivals to not leave barriers in a bad state. # We only need to account for the prologue steps, only the first # delay_release of them skip arrivals, so we subtract them. @pl.when(pipeline_state is None or pipeline_state == PipelinePipeline.STOP) def _quiesce(): @pl.loop( num_steps - pipeline_init_prologue_steps, num_steps - delay_release, unroll=not has_dynamic_grid, ) def _epi_step(step): consumed_slot = lax.rem(step, max_concurrent_steps) for barrier in flat_consumed_barrier_refs: gpu_primitives.barrier_wait(barrier.at[consumed_slot]) wg_idx = lax.axis_index(wg_axis) lax.cond( wg_idx != memory_thread_idx, compute_block, memory_block ) # Mypy doesn't notice the .get_allocations assignment above. return pipeline # type: ignore def _compute_registers( memory_registers: int, num_compute_wgs: int, ) -> int: """Returns the max number of registers to use in compute threads. We start with the theoretical max registers per thread if one wargroup (128 threads) used the entire SM's 64k register file (64k / 128 = 512). Then reserve `memory_registers` for the producer warpgroup and distribute the remaining registers evenly among the compute warpgroups. Note: The maximum number of registers per thread is 255, so we clamp the value. """ n_registers = min(256, (512 - memory_registers) / num_compute_wgs) # Round down to the nearest multiple of 8. return int((n_registers // 8) * 8)
WarpSpecializedPipeline
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_vision.py
{ "start": 2800, "end": 3377 }
class ____(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" # Copied from transformers.models.beit.modeling_beit.BeitEmbeddings with Beit->Data2VecVision
Data2VecVisionDropPath
python
pandas-dev__pandas
pandas/tests/io/test_pickle.py
{ "start": 6121, "end": 11211 }
class ____: _extension_to_compression = icom.extension_to_compression def compress_file(self, src_path, dest_path, compression): if compression is None: shutil.copyfile(src_path, dest_path) return if compression == "gzip": f = gzip.open(dest_path, "w") elif compression == "bz2": f = bz2.BZ2File(dest_path, "w") elif compression == "zip": with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == "tar": with open(src_path, "rb") as fh: with tarfile.open(dest_path, mode="w") as tar: tarinfo = tar.gettarinfo(src_path, os.path.basename(src_path)) tar.addfile(tarinfo, fh) elif compression == "xz": import lzma f = lzma.LZMAFile(dest_path, "w") elif compression == "zstd": f = import_optional_dependency("zstandard").open(dest_path, "wb") else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) if compression not in ["zip", "tar"]: with open(src_path, "rb") as fh: with f: f.write(fh.read()) def test_write_explicit(self, compression, get_random_path, temp_file): p1 = temp_file.parent / f"{temp_file.stem}.compressed" p2 = temp_file.parent / f"{temp_file.stem}.raw" df = DataFrame( 1.1 * np.arange(120).reshape((30, 4)), columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) # write to compressed file df.to_pickle(p1, compression=compression) # decompress with tm.decompress_file(p1, compression=compression) as f: with open(p2, "wb") as fh: fh.write(f.read()) # read decompressed file df2 = pd.read_pickle(p2, compression=None) tm.assert_frame_equal(df, df2) @pytest.mark.parametrize("compression", ["", "None", "bad", "7z"]) def test_write_explicit_bad(self, compression, get_random_path, temp_file): df = DataFrame( 1.1 * np.arange(120).reshape((30, 4)), columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) path = temp_file with pytest.raises(ValueError, match="Unrecognized compression type"): df.to_pickle(path, compression=compression) def test_write_infer(self, compression_ext, get_random_path, temp_file): p1 = temp_file.parent / f"{temp_file.stem}{compression_ext}" p2 = temp_file.parent / f"{temp_file.stem}.raw" compression = self._extension_to_compression.get(compression_ext.lower()) df = DataFrame( 1.1 * np.arange(120).reshape((30, 4)), columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) # write to compressed file by inferred compression method df.to_pickle(p1) # decompress with tm.decompress_file(p1, compression=compression) as f: with open(p2, "wb") as fh: fh.write(f.read()) # read decompressed file df2 = pd.read_pickle(p2, compression=None) tm.assert_frame_equal(df, df2) def test_read_explicit(self, compression, get_random_path, temp_file): p1 = temp_file.parent / f"{temp_file.stem}.raw" p2 = temp_file.parent / f"{temp_file.stem}.compressed" df = DataFrame( 1.1 * np.arange(120).reshape((30, 4)), columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) # write to uncompressed file df.to_pickle(p1, compression=None) # compress self.compress_file(p1, p2, compression=compression) # read compressed file df2 = pd.read_pickle(p2, compression=compression) tm.assert_frame_equal(df, df2) def test_read_infer(self, compression_ext, get_random_path, temp_file): p1 = temp_file.parent / f"{temp_file.stem}.raw" p2 = temp_file.parent / f"{temp_file.stem}{compression_ext}" compression = self._extension_to_compression.get(compression_ext.lower()) df = DataFrame( 1.1 * np.arange(120).reshape((30, 4)), columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) # write to uncompressed file df.to_pickle(p1, compression=None) # compress self.compress_file(p1, p2, compression=compression) # read compressed file by inferred compression method df2 = pd.read_pickle(p2) tm.assert_frame_equal(df, df2) # --------------------- # test pickle compression # ---------------------
TestCompression
python
numba__numba
numba/core/typing/builtins.py
{ "start": 8382, "end": 8970 }
class ____(ConcreteTemplate): cases = list(integer_binop_cases) # Ensure that float32 ** int doesn't go through DP computations cases += [signature(types.float32, types.float32, op) for op in (types.int32, types.int64, types.uint64)] cases += [signature(types.float64, types.float64, op) for op in (types.int32, types.int64, types.uint64)] cases += [signature(op, op, op) for op in sorted(types.real_domain)] cases += [signature(op, op, op) for op in sorted(types.complex_domain)] @infer_global(pow)
BinOpPower
python
PyCQA__isort
isort/main.py
{ "start": 1633, "end": 47521 }
class ____: def __init__(self, incorrectly_sorted: bool, skipped: bool, supported_encoding: bool) -> None: self.incorrectly_sorted = incorrectly_sorted self.skipped = skipped self.supported_encoding = supported_encoding def sort_imports( file_name: str, config: Config, check: bool = False, ask_to_apply: bool = False, write_to_stdout: bool = False, **kwargs: Any, ) -> SortAttempt | None: incorrectly_sorted: bool = False skipped: bool = False try: if check: try: incorrectly_sorted = not api.check_file(file_name, config=config, **kwargs) except FileSkipped: skipped = True return SortAttempt(incorrectly_sorted, skipped, True) try: incorrectly_sorted = not api.sort_file( file_name, config=config, ask_to_apply=ask_to_apply, write_to_stdout=write_to_stdout, **kwargs, ) except FileSkipped: skipped = True return SortAttempt(incorrectly_sorted, skipped, True) except (OSError, ValueError) as error: warn(f"Unable to parse file {file_name} due to {error}", stacklevel=2) return None except UnsupportedEncoding: if config.verbose: warn(f"Encoding not supported for {file_name}", stacklevel=2) return SortAttempt(incorrectly_sorted, skipped, False) except ISortError as error: _print_hard_fail(config, message=str(error)) sys.exit(1) except Exception: _print_hard_fail(config, offending_file=file_name) raise def _print_hard_fail( config: Config, offending_file: str | None = None, message: str | None = None ) -> None: """Fail on unrecoverable exception with custom message.""" message = message or ( f"Unrecoverable exception thrown when parsing {offending_file or ''}! " "This should NEVER happen.\n" "If encountered, please open an issue: https://github.com/PyCQA/isort/issues/new" ) printer = create_terminal_printer( color=config.color_output, error=config.format_error, success=config.format_success ) printer.error(message) def _build_arg_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description="Sort Python import definitions alphabetically " "within logical sections. Run with no arguments to see a quick " "start guide, otherwise, one or more files/directories/stdin must be provided. " "Use `-` as the first argument to represent stdin. Use --interactive to use the pre 5.0.0 " "interactive behavior." " " "If you've used isort 4 but are new to isort 5, see the upgrading guide: " "https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html", add_help=False, # prevent help option from appearing in "optional arguments" group ) general_group = parser.add_argument_group("general options") target_group = parser.add_argument_group("target options") output_group = parser.add_argument_group("general output options") inline_args_group = output_group.add_mutually_exclusive_group() section_group = parser.add_argument_group("section output options") deprecated_group = parser.add_argument_group("deprecated options") general_group.add_argument( "-h", "--help", action="help", default=argparse.SUPPRESS, help=_("show this help message and exit"), ) general_group.add_argument( "-V", "--version", action="store_true", dest="show_version", help="Displays the currently installed version of isort.", ) general_group.add_argument( "--vn", "--version-number", action="version", version=__version__, help="Returns just the current version number without the logo", ) general_group.add_argument( "-v", "--verbose", action="store_true", dest="verbose", help="Shows verbose output, such as when files are skipped or when a check is successful.", ) general_group.add_argument( "--only-modified", "--om", dest="only_modified", action="store_true", help="Suppresses verbose output for non-modified files.", ) general_group.add_argument( "--dedup-headings", dest="dedup_headings", action="store_true", help="Tells isort to only show an identical custom import heading comment once, even if" " there are multiple sections with the comment set.", ) general_group.add_argument( "-q", "--quiet", action="store_true", dest="quiet", help="Shows extra quiet output, only errors are outputted.", ) general_group.add_argument( "-d", "--stdout", help="Force resulting output to stdout, instead of in-place.", dest="write_to_stdout", action="store_true", ) general_group.add_argument( "--overwrite-in-place", help="Tells isort to overwrite in place using the same file handle. " "Comes at a performance and memory usage penalty over its standard " "approach but ensures all file flags and modes stay unchanged.", dest="overwrite_in_place", action="store_true", ) general_group.add_argument( "--show-config", dest="show_config", action="store_true", help="See isort's determined config, as well as sources of config options.", ) general_group.add_argument( "--show-files", dest="show_files", action="store_true", help="See the files isort will be run against with the current config options.", ) general_group.add_argument( "--df", "--diff", dest="show_diff", action="store_true", help="Prints a diff of all the changes isort would make to a file, instead of " "changing it in place", ) general_group.add_argument( "-c", "--check-only", "--check", action="store_true", dest="check", help="Checks the file for unsorted / unformatted imports and prints them to the " "command line without modifying the file. Returns 0 when nothing would change and " "returns 1 when the file would be reformatted.", ) general_group.add_argument( "--ws", "--ignore-whitespace", action="store_true", dest="ignore_whitespace", help="Tells isort to ignore whitespace differences when --check-only is being used.", ) general_group.add_argument( "--sp", "--settings-path", "--settings-file", "--settings", dest="settings_path", help="Explicitly set the settings path or file instead of auto determining " "based on file location.", ) general_group.add_argument( "--cr", "--config-root", dest="config_root", help="Explicitly set the config root for resolving all configs. When used " "with the --resolve-all-configs flag, isort will look at all sub-folders " "in this config root to resolve config files and sort files based on the " "closest available config(if any)", ) general_group.add_argument( "--resolve-all-configs", dest="resolve_all_configs", action="store_true", help="Tells isort to resolve the configs for all sub-directories " "and sort files in terms of its closest config files.", ) general_group.add_argument( "--profile", dest="profile", type=str, help="Base profile type to use for configuration. " f"Profiles include: {', '.join(profiles.keys())}. As well as any shared profiles.", ) general_group.add_argument( "-j", "--jobs", help="Number of files to process in parallel. Negative value means use number of CPUs.", dest="jobs", type=int, nargs="?", const=-1, ) general_group.add_argument( "--ac", "--atomic", dest="atomic", action="store_true", help="Ensures the output doesn't save if the resulting file contains syntax errors.", ) general_group.add_argument( "--interactive", dest="ask_to_apply", action="store_true", help="Tells isort to apply changes interactively.", ) general_group.add_argument( "--format-error", dest="format_error", help="Override the format used to print errors.", ) general_group.add_argument( "--format-success", dest="format_success", help="Override the format used to print success.", ) general_group.add_argument( "--srx", "--sort-reexports", dest="sort_reexports", action="store_true", help="Automatically sort all re-exports (module level __all__ collections)", ) target_group.add_argument( "files", nargs="*", help="One or more Python source files that need their imports sorted." ) target_group.add_argument( "--filter-files", dest="filter_files", action="store_true", help="Tells isort to filter files even when they are explicitly passed in as " "part of the CLI command.", ) target_group.add_argument( "-s", "--skip", help="Files that isort should skip over. If you want to skip multiple " "files you should specify twice: --skip file1 --skip file2. Values can be " "file names, directory names or file paths. To skip all files in a nested path " "use --skip-glob.", dest="skip", action="append", ) target_group.add_argument( "--extend-skip", help="Extends --skip to add additional files that isort should skip over. " "If you want to skip multiple " "files you should specify twice: --skip file1 --skip file2. Values can be " "file names, directory names or file paths. To skip all files in a nested path " "use --skip-glob.", dest="extend_skip", action="append", ) target_group.add_argument( "--sg", "--skip-glob", help="Files that isort should skip over.", dest="skip_glob", action="append", ) target_group.add_argument( "--extend-skip-glob", help="Additional files that isort should skip over (extending --skip-glob).", dest="extend_skip_glob", action="append", ) target_group.add_argument( "--gitignore", "--skip-gitignore", action="store_true", dest="skip_gitignore", help="Treat project as a git repository and ignore files listed in .gitignore." "\nNOTE: This requires git to be installed and accessible from the same shell as isort.", ) target_group.add_argument( "--ext", "--extension", "--supported-extension", dest="supported_extensions", action="append", help="Specifies what extensions isort can be run against.", ) target_group.add_argument( "--blocked-extension", dest="blocked_extensions", action="append", help="Specifies what extensions isort can never be run against.", ) target_group.add_argument( "--dont-follow-links", dest="dont_follow_links", action="store_true", help="Tells isort not to follow symlinks that are encountered when running recursively.", ) target_group.add_argument( "--filename", dest="filename", help="Provide the filename associated with a stream.", ) target_group.add_argument( "--allow-root", action="store_true", default=False, help="Tells isort not to treat / specially, allowing it to be run against the root dir.", ) output_group.add_argument( "-a", "--add-import", dest="add_imports", action="append", help="Adds the specified import line to all files, " "automatically determining correct placement.", ) output_group.add_argument( "--append", "--append-only", dest="append_only", action="store_true", help="Only adds the imports specified in --add-import if the file" " contains existing imports.", ) output_group.add_argument( "--af", "--force-adds", dest="force_adds", action="store_true", help="Forces import adds even if the original file is empty.", ) output_group.add_argument( "--rm", "--remove-import", dest="remove_imports", action="append", help="Removes the specified import from all files.", ) output_group.add_argument( "--float-to-top", dest="float_to_top", action="store_true", help="Causes all non-indented imports to float to the top of the file having its imports " "sorted (immediately below the top of file comment).\n" "This can be an excellent shortcut for collecting imports every once in a while " "when you place them in the middle of a file to avoid context switching.\n\n" "*NOTE*: It currently doesn't work with cimports and introduces some extra over-head " "and a performance penalty.", ) output_group.add_argument( "--dont-float-to-top", dest="dont_float_to_top", action="store_true", help="Forces --float-to-top setting off. See --float-to-top for more information.", ) output_group.add_argument( "--ca", "--combine-as", dest="combine_as_imports", action="store_true", help="Combines as imports on the same line.", ) output_group.add_argument( "--cs", "--combine-star", dest="combine_star", action="store_true", help="Ensures that if a star import is present, " "nothing else is imported from that namespace.", ) output_group.add_argument( "-e", "--balanced", dest="balanced_wrapping", action="store_true", help="Balances wrapping to produce the most consistent line length possible", ) output_group.add_argument( "--ff", "--from-first", dest="from_first", action="store_true", help="Switches the typical ordering preference, " "showing from imports first then straight ones.", ) output_group.add_argument( "--fgw", "--force-grid-wrap", nargs="?", const=2, type=int, dest="force_grid_wrap", help="Force number of from imports (defaults to 2 when passed as CLI flag without value) " "to be grid wrapped regardless of line " "length. If 0 is passed in (the global default) only line length is considered.", ) output_group.add_argument( "-i", "--indent", help='String to place for indents defaults to " " (4 spaces).', dest="indent", type=str, ) output_group.add_argument( "--lbi", "--lines-before-imports", dest="lines_before_imports", type=int ) output_group.add_argument( "--lai", "--lines-after-imports", dest="lines_after_imports", type=int ) output_group.add_argument( "--lbt", "--lines-between-types", dest="lines_between_types", type=int ) output_group.add_argument( "--le", "--line-ending", dest="line_ending", help="Forces line endings to the specified value. " "If not set, values will be guessed per-file.", ) output_group.add_argument( "--ls", "--length-sort", help="Sort imports by their string length.", dest="length_sort", action="store_true", ) output_group.add_argument( "--lss", "--length-sort-straight", help="Sort straight imports by their string length. Similar to `length_sort` " "but applies only to straight imports and doesn't affect from imports.", dest="length_sort_straight", action="store_true", ) output_group.add_argument( "-m", "--multi-line", dest="multi_line_output", choices=list(WrapModes.__members__.keys()) + [str(mode.value) for mode in WrapModes.__members__.values()], type=str, help="Multi line output (0-grid, 1-vertical, 2-hanging, 3-vert-hanging, 4-vert-grid, " "5-vert-grid-grouped, 6-deprecated-alias-for-5, 7-noqa, " "8-vertical-hanging-indent-bracket, 9-vertical-prefix-from-module-import, " "10-hanging-indent-with-parentheses).", ) output_group.add_argument( "-n", "--ensure-newline-before-comments", dest="ensure_newline_before_comments", action="store_true", help="Inserts a blank line before a comment following an import.", ) inline_args_group.add_argument( "--nis", "--no-inline-sort", dest="no_inline_sort", action="store_true", help="Leaves `from` imports with multiple imports 'as-is' " "(e.g. `from foo import a, c ,b`).", ) output_group.add_argument( "--ot", "--order-by-type", dest="order_by_type", action="store_true", help="Order imports by type, which is determined by case, in addition to alphabetically.\n" "\n**NOTE**: type here refers to the implied type from the import name capitalization.\n" ' isort does not do type introspection for the imports. These "types" are simply: ' "CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8" " or a related coding standard and has many imports this is a good default, otherwise you " "likely will want to turn it off. From the CLI the `--dont-order-by-type` option will turn " "this off.", ) output_group.add_argument( "--dt", "--dont-order-by-type", dest="dont_order_by_type", action="store_true", help="Don't order imports by type, which is determined by case, in addition to " "alphabetically.\n\n" "**NOTE**: type here refers to the implied type from the import name capitalization.\n" ' isort does not do type introspection for the imports. These "types" are simply: ' "CONSTANT_VARIABLE, CamelCaseClass, variable_or_function. If your project follows PEP8" " or a related coding standard and has many imports this is a good default. You can turn " "this on from the CLI using `--order-by-type`.", ) output_group.add_argument( "--rr", "--reverse-relative", dest="reverse_relative", action="store_true", help="Reverse order of relative imports.", ) output_group.add_argument( "--reverse-sort", dest="reverse_sort", action="store_true", help="Reverses the ordering of imports.", ) output_group.add_argument( "--sort-order", dest="sort_order", help="Specify sorting function. Can be built in (natural[default] = force numbers " "to be sequential, native = Python's built-in sorted function) or an installable plugin.", ) inline_args_group.add_argument( "--sl", "--force-single-line-imports", dest="force_single_line", action="store_true", help="Forces all from imports to appear on their own line", ) output_group.add_argument( "--nsl", "--single-line-exclusions", help="One or more modules to exclude from the single line rule.", dest="single_line_exclusions", action="append", ) output_group.add_argument( "--tc", "--trailing-comma", dest="include_trailing_comma", action="store_true", help="Includes a trailing comma on multi line imports that include parentheses.", ) output_group.add_argument( "--up", "--use-parentheses", dest="use_parentheses", action="store_true", help="Use parentheses for line continuation on length limit instead of slashes." " **NOTE**: This is separate from wrap modes, and only affects how individual lines that " " are too long get continued, not sections of multiple imports.", ) output_group.add_argument( "-l", "-w", "--line-length", "--line-width", help="The max length of an import line (used for wrapping long imports).", dest="line_length", type=int, ) output_group.add_argument( "--wl", "--wrap-length", dest="wrap_length", type=int, help="Specifies how long lines that are wrapped should be, if not set line_length is used." "\nNOTE: wrap_length must be LOWER than or equal to line_length.", ) output_group.add_argument( "--case-sensitive", dest="case_sensitive", action="store_true", help="Tells isort to include casing when sorting module names", ) output_group.add_argument( "--remove-redundant-aliases", dest="remove_redundant_aliases", action="store_true", help=( "Tells isort to remove redundant aliases from imports, such as `import os as os`." " This defaults to `False` simply because some projects use these seemingly useless " " aliases to signify intent and change behaviour." ), ) output_group.add_argument( "--honor-noqa", dest="honor_noqa", action="store_true", help="Tells isort to honor noqa comments to enforce skipping those comments.", ) output_group.add_argument( "--treat-comment-as-code", dest="treat_comments_as_code", action="append", help="Tells isort to treat the specified single line comment(s) as if they are code.", ) output_group.add_argument( "--treat-all-comment-as-code", dest="treat_all_comments_as_code", action="store_true", help="Tells isort to treat all single line comments as if they are code.", ) output_group.add_argument( "--formatter", dest="formatter", type=str, help="Specifies the name of a formatting plugin to use when producing output.", ) output_group.add_argument( "--color", dest="color_output", action="store_true", help="Tells isort to use color in terminal output.", ) output_group.add_argument( "--ext-format", dest="ext_format", help="Tells isort to format the given files according to an extensions formatting rules.", ) output_group.add_argument( "--star-first", help="Forces star imports above others to avoid overriding directly imported variables.", dest="star_first", action="store_true", ) output_group.add_argument( "--split-on-trailing-comma", help="Split imports list followed by a trailing comma into VERTICAL_HANGING_INDENT mode", dest="split_on_trailing_comma", action="store_true", ) section_group.add_argument( "--sd", "--section-default", dest="default_section", help="Sets the default section for import options: " + str(sections.DEFAULT), ) section_group.add_argument( "--only-sections", "--os", dest="only_sections", action="store_true", help="Causes imports to be sorted based on their sections like STDLIB, THIRDPARTY, etc. " "Within sections, the imports are ordered by their import style and the imports with " "the same style maintain their relative positions.", ) section_group.add_argument( "--ds", "--no-sections", help="Put all imports into the same section bucket", dest="no_sections", action="store_true", ) section_group.add_argument( "--fas", "--force-alphabetical-sort", action="store_true", dest="force_alphabetical_sort", help="Force all imports to be sorted as a single section", ) section_group.add_argument( "--fss", "--force-sort-within-sections", action="store_true", dest="force_sort_within_sections", help="Don't sort straight-style imports (like import sys) before from-style imports " "(like from itertools import groupby). Instead, sort the imports by module, " "independent of import style.", ) section_group.add_argument( "--hcss", "--honor-case-in-force-sorted-sections", action="store_true", dest="honor_case_in_force_sorted_sections", help="Honor `--case-sensitive` when `--force-sort-within-sections` is being used. " "Without this option set, `--order-by-type` decides module name ordering too.", ) section_group.add_argument( "--srss", "--sort-relative-in-force-sorted-sections", action="store_true", dest="sort_relative_in_force_sorted_sections", help="When using `--force-sort-within-sections`, sort relative imports the same " "way as they are sorted when not using that setting.", ) section_group.add_argument( "--fass", "--force-alphabetical-sort-within-sections", action="store_true", dest="force_alphabetical_sort_within_sections", help="Force all imports to be sorted alphabetically within a section", ) section_group.add_argument( "-t", "--top", help="Force specific imports to the top of their appropriate section.", dest="force_to_top", action="append", ) section_group.add_argument( "--combine-straight-imports", "--csi", dest="combine_straight_imports", action="store_true", help="Combines all the bare straight imports of the same section in a single line. " "Won't work with sections which have 'as' imports", ) section_group.add_argument( "--nlb", "--no-lines-before", help="Sections which should not be split with previous by empty lines", dest="no_lines_before", action="append", ) section_group.add_argument( "--src", "--src-path", dest="src_paths", action="append", help="Add an explicitly defined source path " "(modules within src paths have their imports automatically categorized as first_party)." " Glob expansion (`*` and `**`) is supported for this option.", ) section_group.add_argument( "-b", "--builtin", dest="known_standard_library", action="append", help="Force isort to recognize a module as part of Python's standard library.", ) section_group.add_argument( "--extra-builtin", dest="extra_standard_library", action="append", help="Extra modules to be included in the list of ones in Python's standard library.", ) section_group.add_argument( "-f", "--future", dest="known_future_library", action="append", help="Force isort to recognize a module as part of Python's internal future compatibility " "libraries. WARNING: this overrides the behavior of __future__ handling and therefore" " can result in code that can't execute. If you're looking to add dependencies such " "as six, a better option is to create another section below --future using custom " "sections. See: https://github.com/PyCQA/isort#custom-sections-and-ordering and the " "discussion here: https://github.com/PyCQA/isort/issues/1463.", ) section_group.add_argument( "-o", "--thirdparty", dest="known_third_party", action="append", help="Force isort to recognize a module as being part of a third party library.", ) section_group.add_argument( "-p", "--project", dest="known_first_party", action="append", help="Force isort to recognize a module as being part of the current python project.", ) section_group.add_argument( "--known-local-folder", dest="known_local_folder", action="append", help="Force isort to recognize a module as being a local folder. " "Generally, this is reserved for relative imports (from . import module).", ) section_group.add_argument( "--virtual-env", dest="virtual_env", help="Virtual environment to use for determining whether a package is third-party", ) section_group.add_argument( "--conda-env", dest="conda_env", help="Conda environment to use for determining whether a package is third-party", ) section_group.add_argument( "--py", "--python-version", action="store", dest="py_version", choices=(*tuple(VALID_PY_TARGETS), "auto"), help="Tells isort to set the known standard library based on the specified Python " "version. Default is to assume any Python 3 version could be the target, and use a union " "of all stdlib modules across versions. If auto is specified, the version of the " "interpreter used to run isort " f"(currently: {sys.version_info.major}{sys.version_info.minor}) will be used.", ) # deprecated options deprecated_group.add_argument( "--recursive", dest="deprecated_flags", action="append_const", const="--recursive", help=argparse.SUPPRESS, ) deprecated_group.add_argument( "-rc", dest="deprecated_flags", action="append_const", const="-rc", help=argparse.SUPPRESS ) deprecated_group.add_argument( "--dont-skip", dest="deprecated_flags", action="append_const", const="--dont-skip", help=argparse.SUPPRESS, ) deprecated_group.add_argument( "-ns", dest="deprecated_flags", action="append_const", const="-ns", help=argparse.SUPPRESS ) deprecated_group.add_argument( "--apply", dest="deprecated_flags", action="append_const", const="--apply", help=argparse.SUPPRESS, ) deprecated_group.add_argument( "-k", "--keep-direct-and-as", dest="deprecated_flags", action="append_const", const="--keep-direct-and-as", help=argparse.SUPPRESS, ) return parser def parse_args(argv: Sequence[str] | None = None) -> dict[str, Any]: argv = sys.argv[1:] if argv is None else list(argv) remapped_deprecated_args = [] for index, arg in enumerate(argv): if arg in DEPRECATED_SINGLE_DASH_ARGS: remapped_deprecated_args.append(arg) argv[index] = f"-{arg}" parser = _build_arg_parser() arguments = {key: value for key, value in vars(parser.parse_args(argv)).items() if value} if remapped_deprecated_args: arguments["remapped_deprecated_args"] = remapped_deprecated_args if "dont_order_by_type" in arguments: arguments["order_by_type"] = False del arguments["dont_order_by_type"] if "dont_follow_links" in arguments: arguments["follow_links"] = False del arguments["dont_follow_links"] if "dont_float_to_top" in arguments: del arguments["dont_float_to_top"] if arguments.get("float_to_top", False): sys.exit("Can't set both --float-to-top and --dont-float-to-top.") else: arguments["float_to_top"] = False multi_line_output = arguments.get("multi_line_output", None) if multi_line_output: if multi_line_output.isdigit(): arguments["multi_line_output"] = WrapModes(int(multi_line_output)) else: arguments["multi_line_output"] = WrapModes[multi_line_output] return arguments def _preconvert(item: Any) -> str | list[Any]: """Preconverts objects from native types into JSONifyiable types""" if isinstance(item, (set, frozenset)): return list(item) if isinstance(item, WrapModes): return str(item.name) if isinstance(item, Path): return str(item) if callable(item) and hasattr(item, "__name__"): return str(item.__name__) raise TypeError(f"Unserializable object {item} of type {type(item)}") def identify_imports_main( argv: Sequence[str] | None = None, stdin: TextIOWrapper | None = None ) -> None: parser = argparse.ArgumentParser( description="Get all import definitions from a given file." "Use `-` as the first argument to represent stdin." ) parser.add_argument( "files", nargs="+", help="One or more Python source files that need their imports sorted." ) parser.add_argument( "--top-only", action="store_true", default=False, help="Only identify imports that occur in before functions or classes.", ) target_group = parser.add_argument_group("target options") target_group.add_argument( "--follow-links", action="store_true", default=False, help="Tells isort to follow symlinks that are encountered when running recursively.", ) uniqueness = parser.add_mutually_exclusive_group() uniqueness.add_argument( "--unique", action="store_true", default=False, help="If true, isort will only identify unique imports.", ) uniqueness.add_argument( "--packages", dest="unique", action="store_const", const=api.ImportKey.PACKAGE, default=False, help="If true, isort will only identify the unique top level modules imported.", ) uniqueness.add_argument( "--modules", dest="unique", action="store_const", const=api.ImportKey.MODULE, default=False, help="If true, isort will only identify the unique modules imported.", ) uniqueness.add_argument( "--attributes", dest="unique", action="store_const", const=api.ImportKey.ATTRIBUTE, default=False, help="If true, isort will only identify the unique attributes imported.", ) arguments = parser.parse_args(argv) file_names = arguments.files if file_names == ["-"]: identified_imports = api.find_imports_in_stream( sys.stdin if stdin is None else stdin, unique=arguments.unique, top_only=arguments.top_only, follow_links=arguments.follow_links, ) else: identified_imports = api.find_imports_in_paths( file_names, unique=arguments.unique, top_only=arguments.top_only, follow_links=arguments.follow_links, ) for identified_import in identified_imports: if arguments.unique == api.ImportKey.PACKAGE: print(identified_import.module.split(".")[0]) elif arguments.unique == api.ImportKey.MODULE: print(identified_import.module) elif arguments.unique == api.ImportKey.ATTRIBUTE: print(f"{identified_import.module}.{identified_import.attribute}") else: print(str(identified_import)) # Ignore DeepSource cyclomatic complexity check for this function. It is one # the main entrypoints so sort of expected to be complex. # skipcq: PY-R1000 def main(argv: Sequence[str] | None = None, stdin: TextIOWrapper | None = None) -> None: arguments = parse_args(argv) if arguments.get("show_version"): print(ASCII_ART) return show_config: bool = arguments.pop("show_config", False) show_files: bool = arguments.pop("show_files", False) if show_config and show_files: sys.exit("Error: either specify show-config or show-files not both.") if "settings_path" in arguments: if os.path.isfile(arguments["settings_path"]): arguments["settings_file"] = os.path.abspath(arguments["settings_path"]) arguments["settings_path"] = os.path.dirname(arguments["settings_file"]) else: arguments["settings_path"] = os.path.abspath(arguments["settings_path"]) if "virtual_env" in arguments: venv = arguments["virtual_env"] arguments["virtual_env"] = os.path.abspath(venv) if not os.path.isdir(arguments["virtual_env"]): warn(f"virtual_env dir does not exist: {arguments['virtual_env']}", stacklevel=2) file_names = arguments.pop("files", []) if not file_names and not show_config: print(QUICK_GUIDE) if arguments: sys.exit("Error: arguments passed in without any paths or content.") return if "settings_path" not in arguments: arguments["settings_path"] = ( arguments.get("filename", None) or os.getcwd() if file_names == ["-"] else os.path.abspath(file_names[0] if file_names else ".") ) if not os.path.isdir(arguments["settings_path"]): arguments["settings_path"] = os.path.dirname(arguments["settings_path"]) config_dict = arguments.copy() ask_to_apply = config_dict.pop("ask_to_apply", False) jobs = config_dict.pop("jobs", None) check = config_dict.pop("check", False) show_diff = config_dict.pop("show_diff", False) write_to_stdout = config_dict.pop("write_to_stdout", False) deprecated_flags = config_dict.pop("deprecated_flags", False) remapped_deprecated_args = config_dict.pop("remapped_deprecated_args", False) stream_filename = config_dict.pop("filename", None) ext_format = config_dict.pop("ext_format", None) allow_root = config_dict.pop("allow_root", None) resolve_all_configs = config_dict.pop("resolve_all_configs", False) wrong_sorted_files = False all_attempt_broken = False no_valid_encodings = False config_trie: Trie | None = None if resolve_all_configs: config_trie = find_all_configs(config_dict.pop("config_root", ".")) if "src_paths" in config_dict: config_dict["src_paths"] = { Path(src_path).resolve() for src_path in config_dict.get("src_paths", ()) } config = Config(**config_dict) if show_config: print(json.dumps(config.__dict__, indent=4, separators=(",", ": "), default=_preconvert)) return if file_names == ["-"]: file_path = Path(stream_filename) if stream_filename else None if show_files: sys.exit("Error: can't show files for streaming input.") input_stream = sys.stdin if stdin is None else stdin if check: incorrectly_sorted = not api.check_stream( input_stream=input_stream, config=config, show_diff=show_diff, file_path=file_path, extension=ext_format, ) wrong_sorted_files = incorrectly_sorted else: try: api.sort_stream( input_stream=input_stream, output_stream=sys.stdout, config=config, show_diff=show_diff, file_path=file_path, extension=ext_format, raise_on_skip=False, ) except FileSkipped: sys.stdout.write(input_stream.read()) elif "/" in file_names and not allow_root: printer = create_terminal_printer( color=config.color_output, error=config.format_error, success=config.format_success ) printer.error("it is dangerous to operate recursively on '/'") printer.error("use --allow-root to override this failsafe") sys.exit(1) else: if stream_filename: printer = create_terminal_printer( color=config.color_output, error=config.format_error, success=config.format_success ) printer.error("Filename override is intended only for stream (-) sorting.") sys.exit(1) skipped: list[str] = [] broken: list[str] = [] if config.filter_files: filtered_files = [] for file_name in file_names: if config.is_skipped(Path(file_name)): skipped.append(str(Path(file_name).resolve())) else: filtered_files.append(file_name) file_names = filtered_files file_names = files.find(file_names, config, skipped, broken) if show_files: for file_name in file_names: print(file_name) return num_skipped = 0 num_broken = 0 num_invalid_encoding = 0 if config.verbose: print(ASCII_ART) if jobs: import multiprocessing.pool # noqa: PLC0415 executor_ctx: multiprocessing.pool.Pool | AbstractContextManager[None] = ( multiprocessing.pool.Pool(jobs if jobs > 0 else multiprocessing.cpu_count()) ) else: executor_ctx = nullcontext() with executor_ctx as executor: if executor is not None: attempt_iterator = executor.imap( functools.partial( sort_imports, config=config, check=check, ask_to_apply=ask_to_apply, show_diff=show_diff, write_to_stdout=write_to_stdout, extension=ext_format, config_trie=config_trie, ), file_names, ) else: # https://github.com/python/typeshed/pull/2814 attempt_iterator = ( sort_imports( # type: ignore file_name, config=config, check=check, ask_to_apply=ask_to_apply, show_diff=show_diff, write_to_stdout=write_to_stdout, extension=ext_format, config_trie=config_trie, ) for file_name in file_names ) # If any files passed in are missing considered as error, should be removed is_no_attempt = True any_encoding_valid = False for sort_attempt in attempt_iterator: if not sort_attempt: continue # pragma: no cover - shouldn't happen, satisfies type constraint incorrectly_sorted = sort_attempt.incorrectly_sorted if arguments.get("check", False) and incorrectly_sorted: wrong_sorted_files = True if sort_attempt.skipped: num_skipped += ( 1 # pragma: no cover - shouldn't happen, due to skip in iter_source_code ) if not sort_attempt.supported_encoding: num_invalid_encoding += 1 else: any_encoding_valid = True is_no_attempt = False num_skipped += len(skipped) if num_skipped and not config.quiet: if config.verbose: for was_skipped in skipped: print( f"{was_skipped} was skipped as it's listed in 'skip' setting, " "matches a glob in 'skip_glob' setting, or is in a .gitignore file with " "--skip-gitignore enabled." ) print(f"Skipped {num_skipped} files") num_broken += len(broken) if num_broken and not config.quiet: if config.verbose: for was_broken in broken: warn( f"{was_broken} was broken path, make sure it exists correctly", stacklevel=2 ) print(f"Broken {num_broken} paths") if num_broken > 0 and is_no_attempt: all_attempt_broken = True if num_invalid_encoding > 0 and not any_encoding_valid: no_valid_encodings = True if not config.quiet and (remapped_deprecated_args or deprecated_flags): if remapped_deprecated_args: warn( "W0502: The following deprecated single dash CLI flags were used and translated: " f"{', '.join(remapped_deprecated_args)}!", stacklevel=2, ) if deprecated_flags: warn( "W0501: The following deprecated CLI flags were used and ignored: " f"{', '.join(deprecated_flags)}!", stacklevel=2, ) warn( "W0500: Please see the 5.0.0 Upgrade guide: " "https://pycqa.github.io/isort/docs/upgrade_guides/5.0.0.html", stacklevel=2, ) if wrong_sorted_files: sys.exit(1) if all_attempt_broken: sys.exit(1) if no_valid_encodings: printer = create_terminal_printer( color=config.color_output, error=config.format_error, success=config.format_success ) printer.error("No valid encodings.") sys.exit(1) if __name__ == "__main__": main()
SortAttempt
python
python-excel__xlrd
xlrd/book.py
{ "start": 9265, "end": 57527 }
class ____(BaseObject): """ Contents of a "workbook". .. warning:: You should not instantiate this class yourself. You use the :class:`Book` object that was returned when you called :func:`~xlrd.open_workbook`. """ #: The number of worksheets present in the workbook file. #: This information is available even when no sheets have yet been loaded. nsheets = 0 #: Which date system was in force when this file was last saved. #: #: 0: #: 1900 system (the Excel for Windows default). #: #: 1: #: 1904 system (the Excel for Macintosh default). #: #: Defaults to 0 in case it's not specified in the file. datemode = 0 #: Version of BIFF (Binary Interchange File Format) used to create the file. #: Latest is 8.0 (represented here as 80), introduced with Excel 97. #: Earliest supported by this module: 2.0 (represented as 20). biff_version = 0 #: List containing a :class:`Name` object for each ``NAME`` record in the #: workbook. #: #: .. versionadded:: 0.6.0 name_obj_list = [] #: An integer denoting the character set used for strings in this file. #: For BIFF 8 and later, this will be 1200, meaning Unicode; #: more precisely, UTF_16_LE. #: For earlier versions, this is used to derive the appropriate Python #: encoding to be used to convert to Unicode. #: Examples: ``1252 -> 'cp1252'``, ``10000 -> 'mac_roman'`` codepage = None #: The encoding that was derived from the codepage. encoding = None #: A tuple containing the telephone country code for: #: #: ``[0]``: #: the user-interface setting when the file was created. #: #: ``[1]``: #: the regional settings. #: #: Example: ``(1, 61)`` meaning ``(USA, Australia)``. #: #: This information may give a clue to the correct encoding for an #: unknown codepage. For a long list of observed values, refer to the #: OpenOffice.org documentation for the ``COUNTRY`` record. countries = (0, 0) #: What (if anything) is recorded as the name of the last user to #: save the file. user_name = UNICODE_LITERAL('') #: A list of :class:`~xlrd.formatting.Font` class instances, #: each corresponding to a FONT record. #: #: .. versionadded:: 0.6.1 font_list = [] #: A list of :class:`~xlrd.formatting.XF` class instances, #: each corresponding to an ``XF`` record. #: #: .. versionadded:: 0.6.1 xf_list = [] #: A list of :class:`~xlrd.formatting.Format` objects, each corresponding to #: a ``FORMAT`` record, in the order that they appear in the input file. #: It does *not* contain builtin formats. #: #: If you are creating an output file using (for example) :mod:`xlwt`, #: use this list. #: #: The collection to be used for all visual rendering purposes is #: :attr:`format_map`. #: #: .. versionadded:: 0.6.1 format_list = [] ## #: The mapping from :attr:`~xlrd.formatting.XF.format_key` to #: :class:`~xlrd.formatting.Format` object. #: #: .. versionadded:: 0.6.1 format_map = {} #: This provides access via name to the extended format information for #: both built-in styles and user-defined styles. #: #: It maps ``name`` to ``(built_in, xf_index)``, where #: ``name`` is either the name of a user-defined style, #: or the name of one of the built-in styles. Known built-in names are #: Normal, RowLevel_1 to RowLevel_7, #: ColLevel_1 to ColLevel_7, Comma, Currency, Percent, "Comma [0]", #: "Currency [0]", Hyperlink, and "Followed Hyperlink". #: #: ``built_in`` has the following meanings #: #: 1: #: built-in style #: #: 0: #: user-defined #: #: ``xf_index`` is an index into :attr:`Book.xf_list`. #: #: References: OOo docs s6.99 (``STYLE`` record); Excel UI Format/Style #: #: .. versionadded:: 0.6.1 #: #: Extracted only if ``open_workbook(..., formatting_info=True)`` #: #: .. versionadded:: 0.7.4 style_name_map = {} #: This provides definitions for colour indexes. Please refer to #: :ref:`palette` for an explanation #: of how colours are represented in Excel. #: #: Colour indexes into the palette map into ``(red, green, blue)`` tuples. #: "Magic" indexes e.g. ``0x7FFF`` map to ``None``. #: #: :attr:`colour_map` is what you need if you want to render cells on screen #: or in a PDF file. If you are writing an output XLS file, use #: :attr:`palette_record`. #: #: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)`` #: #: .. versionadded:: 0.6.1 colour_map = {} #: If the user has changed any of the colours in the standard palette, the #: XLS file will contain a ``PALETTE`` record with 56 (16 for Excel 4.0 and #: earlier) RGB values in it, and this list will be e.g. #: ``[(r0, b0, g0), ..., (r55, b55, g55)]``. #: Otherwise this list will be empty. This is what you need if you are #: writing an output XLS file. If you want to render cells on screen or in a #: PDF file, use :attr:`colour_map`. #: #: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)`` #: #: .. versionadded:: 0.6.1 palette_record = [] #: Time in seconds to extract the XLS image as a contiguous string #: (or mmap equivalent). load_time_stage_1 = -1.0 #: Time in seconds to parse the data from the contiguous string #: (or mmap equivalent). load_time_stage_2 = -1.0 def sheets(self): """ :returns: A list of all sheets in the book. All sheets not already loaded will be loaded. """ for sheetx in xrange(self.nsheets): if not self._sheet_list[sheetx]: self.get_sheet(sheetx) return self._sheet_list[:] def sheet_by_index(self, sheetx): """ :param sheetx: Sheet index in ``range(nsheets)`` :returns: A :class:`~xlrd.sheet.Sheet`. """ return self._sheet_list[sheetx] or self.get_sheet(sheetx) def __iter__(self): """ Makes iteration through sheets of a book a little more straightforward. Don't free resources after use since it can be called like `list(book)` """ for i in range(self.nsheets): yield self.sheet_by_index(i) def sheet_by_name(self, sheet_name): """ :param sheet_name: Name of the sheet required. :returns: A :class:`~xlrd.sheet.Sheet`. """ try: sheetx = self._sheet_names.index(sheet_name) except ValueError: raise XLRDError('No sheet named <%r>' % sheet_name) return self.sheet_by_index(sheetx) def __getitem__(self, item): """ Allow indexing with sheet name or index. :param item: Name or index of sheet enquired upon :return: :class:`~xlrd.sheet.Sheet`. """ if isinstance(item, int): return self.sheet_by_index(item) else: return self.sheet_by_name(item) def sheet_names(self): """ :returns: A list of the names of all the worksheets in the workbook file. This information is available even when no sheets have yet been loaded. """ return self._sheet_names[:] def sheet_loaded(self, sheet_name_or_index): """ :param sheet_name_or_index: Name or index of sheet enquired upon :returns: ``True`` if sheet is loaded, ``False`` otherwise. .. versionadded:: 0.7.1 """ if isinstance(sheet_name_or_index, int): sheetx = sheet_name_or_index else: try: sheetx = self._sheet_names.index(sheet_name_or_index) except ValueError: raise XLRDError('No sheet named <%r>' % sheet_name_or_index) return bool(self._sheet_list[sheetx]) def unload_sheet(self, sheet_name_or_index): """ :param sheet_name_or_index: Name or index of sheet to be unloaded. .. versionadded:: 0.7.1 """ if isinstance(sheet_name_or_index, int): sheetx = sheet_name_or_index else: try: sheetx = self._sheet_names.index(sheet_name_or_index) except ValueError: raise XLRDError('No sheet named <%r>' % sheet_name_or_index) self._sheet_list[sheetx] = None def release_resources(self): """ This method has a dual purpose. You can call it to release memory-consuming objects and (possibly) a memory-mapped file (:class:`mmap.mmap` object) when you have finished loading sheets in ``on_demand`` mode, but still require the :class:`Book` object to examine the loaded sheets. It is also called automatically (a) when :func:`~xlrd.open_workbook` raises an exception and (b) if you are using a ``with`` statement, when the ``with`` block is exited. Calling this method multiple times on the same object has no ill effect. """ self._resources_released = 1 if hasattr(self.mem, "close"): # must be a mmap.mmap object self.mem.close() self.mem = None if hasattr(self.filestr, "close"): self.filestr.close() self.filestr = None self._sharedstrings = None self._rich_text_runlist_map = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.release_resources() # return false #: A mapping from ``(lower_case_name, scope)`` to a single :class:`Name` #: object. #: #: .. versionadded:: 0.6.0 name_and_scope_map = {} #: A mapping from `lower_case_name` to a list of :class:`Name` objects. #: The list is sorted in scope order. Typically there will be one item #: (of global scope) in the list. #: #: .. versionadded:: 0.6.0 name_map = {} def __init__(self): self._sheet_list = [] self._sheet_names = [] self._sheet_visibility = [] # from BOUNDSHEET record self.nsheets = 0 self._sh_abs_posn = [] # sheet's absolute position in the stream self._sharedstrings = [] self._rich_text_runlist_map = {} self.raw_user_name = False self._sheethdr_count = 0 # BIFF 4W only self.builtinfmtcount = -1 # unknown as yet. BIFF 3, 4S, 4W self.initialise_format_info() self._all_sheets_count = 0 # includes macro & VBA sheets self._supbook_count = 0 self._supbook_locals_inx = None self._supbook_addins_inx = None self._all_sheets_map = [] # maps an all_sheets index to a calc-sheets index (or -1) self._externsheet_info = [] self._externsheet_type_b57 = [] self._extnsht_name_from_num = {} self._sheet_num_from_name = {} self._extnsht_count = 0 self._supbook_types = [] self._resources_released = 0 self.addin_func_names = [] self.name_obj_list = [] self.colour_map = {} self.palette_record = [] self.xf_list = [] self.style_name_map = {} self.mem = b'' self.filestr = b'' def biff2_8_load(self, filename=None, file_contents=None, logfile=sys.stdout, verbosity=0, use_mmap=True, encoding_override=None, formatting_info=False, on_demand=False, ragged_rows=False, ignore_workbook_corruption=False ): # DEBUG = 0 self.logfile = logfile self.verbosity = verbosity self.use_mmap = use_mmap self.encoding_override = encoding_override self.formatting_info = formatting_info self.on_demand = on_demand self.ragged_rows = ragged_rows if not file_contents: with open(filename, "rb") as f: f.seek(0, 2) # EOF size = f.tell() f.seek(0, 0) # BOF if size == 0: raise XLRDError("File size is 0 bytes") if self.use_mmap: self.filestr = mmap.mmap(f.fileno(), size, access=mmap.ACCESS_READ) self.stream_len = size else: self.filestr = f.read() self.stream_len = len(self.filestr) else: self.filestr = file_contents self.stream_len = len(file_contents) self.base = 0 if self.filestr[:8] != compdoc.SIGNATURE: # got this one at the antique store self.mem = self.filestr else: cd = compdoc.CompDoc(self.filestr, logfile=self.logfile, ignore_workbook_corruption=ignore_workbook_corruption) for qname in ['Workbook', 'Book']: self.mem, self.base, self.stream_len = \ cd.locate_named_stream(UNICODE_LITERAL(qname)) if self.mem: break else: raise XLRDError("Can't find workbook in OLE2 compound document") del cd if self.mem is not self.filestr: if hasattr(self.filestr, "close"): self.filestr.close() self.filestr = b'' self._position = self.base if DEBUG: print("mem: %s, base: %d, len: %d" % (type(self.mem), self.base, self.stream_len), file=self.logfile) def initialise_format_info(self): # needs to be done once per sheet for BIFF 4W :-( self.format_map = {} self.format_list = [] self.xfcount = 0 self.actualfmtcount = 0 # number of FORMAT records seen so far self._xf_index_to_xl_type_map = {0: XL_CELL_NUMBER} self._xf_epilogue_done = 0 self.xf_list = [] self.font_list = [] def get2bytes(self): pos = self._position buff_two = self.mem[pos:pos+2] lenbuff = len(buff_two) self._position += lenbuff if lenbuff < 2: return MY_EOF lo, hi = buff_two return (BYTES_ORD(hi) << 8) | BYTES_ORD(lo) def get_record_parts(self): pos = self._position mem = self.mem code, length = unpack('<HH', mem[pos:pos+4]) pos += 4 data = mem[pos:pos+length] self._position = pos + length return (code, length, data) def get_record_parts_conditional(self, reqd_record): pos = self._position mem = self.mem code, length = unpack('<HH', mem[pos:pos+4]) if code != reqd_record: return (None, 0, b'') pos += 4 data = mem[pos:pos+length] self._position = pos + length return (code, length, data) def get_sheet(self, sh_number, update_pos=True): if self._resources_released: raise XLRDError("Can't load sheets after releasing resources.") if update_pos: self._position = self._sh_abs_posn[sh_number] self.getbof(XL_WORKSHEET) # assert biff_version == self.biff_version ### FAILS # Have an example where book is v7 but sheet reports v8!!! # It appears to work OK if the sheet version is ignored. # Confirmed by Daniel Rentz: happens when Excel does "save as" # creating an old version file; ignore version details on sheet BOF. sh = sheet.Sheet( self, self._position, self._sheet_names[sh_number], sh_number, ) sh.read(self) self._sheet_list[sh_number] = sh return sh def get_sheets(self): # DEBUG = 0 if DEBUG: print("GET_SHEETS:", self._sheet_names, self._sh_abs_posn, file=self.logfile) for sheetno in xrange(len(self._sheet_names)): if DEBUG: print("GET_SHEETS: sheetno =", sheetno, self._sheet_names, self._sh_abs_posn, file=self.logfile) self.get_sheet(sheetno) def fake_globals_get_sheet(self): # for BIFF 4.0 and earlier formatting.initialise_book(self) fake_sheet_name = UNICODE_LITERAL('Sheet 1') self._sheet_names = [fake_sheet_name] self._sh_abs_posn = [0] self._sheet_visibility = [0] # one sheet, visible self._sheet_list.append(None) # get_sheet updates _sheet_list but needs a None beforehand self.get_sheets() def handle_boundsheet(self, data): # DEBUG = 1 bv = self.biff_version self.derive_encoding() if DEBUG: fprintf(self.logfile, "BOUNDSHEET: bv=%d data %r\n", bv, data) if bv == 45: # BIFF4W #### Not documented in OOo docs ... # In fact, the *only* data is the name of the sheet. sheet_name = unpack_string(data, 0, self.encoding, lenlen=1) visibility = 0 sheet_type = XL_BOUNDSHEET_WORKSHEET # guess, patch later if len(self._sh_abs_posn) == 0: abs_posn = self._sheetsoffset + self.base # Note (a) this won't be used # (b) it's the position of the SHEETHDR record # (c) add 11 to get to the worksheet BOF record else: abs_posn = -1 # unknown else: offset, visibility, sheet_type = unpack('<iBB', data[0:6]) abs_posn = offset + self.base # because global BOF is always at posn 0 in the stream if bv < BIFF_FIRST_UNICODE: sheet_name = unpack_string(data, 6, self.encoding, lenlen=1) else: sheet_name = unpack_unicode(data, 6, lenlen=1) if DEBUG or self.verbosity >= 2: fprintf(self.logfile, "BOUNDSHEET: inx=%d vis=%r sheet_name=%r abs_posn=%d sheet_type=0x%02x\n", self._all_sheets_count, visibility, sheet_name, abs_posn, sheet_type) self._all_sheets_count += 1 if sheet_type != XL_BOUNDSHEET_WORKSHEET: self._all_sheets_map.append(-1) descr = { 1: 'Macro sheet', 2: 'Chart', 6: 'Visual Basic module', }.get(sheet_type, 'UNKNOWN') if DEBUG or self.verbosity >= 1: fprintf(self.logfile, "NOTE *** Ignoring non-worksheet data named %r (type 0x%02x = %s)\n", sheet_name, sheet_type, descr) else: snum = len(self._sheet_names) self._all_sheets_map.append(snum) self._sheet_names.append(sheet_name) self._sh_abs_posn.append(abs_posn) self._sheet_visibility.append(visibility) self._sheet_num_from_name[sheet_name] = snum def handle_builtinfmtcount(self, data): ### N.B. This count appears to be utterly useless. # DEBUG = 1 builtinfmtcount = unpack('<H', data[0:2])[0] if DEBUG: fprintf(self.logfile, "BUILTINFMTCOUNT: %r\n", builtinfmtcount) self.builtinfmtcount = builtinfmtcount def derive_encoding(self): if self.encoding_override: self.encoding = self.encoding_override elif self.codepage is None: if self.biff_version < 80: fprintf(self.logfile, "*** No CODEPAGE record, no encoding_override: will use 'iso-8859-1'\n") self.encoding = 'iso-8859-1' else: self.codepage = 1200 # utf16le if self.verbosity >= 2: fprintf(self.logfile, "*** No CODEPAGE record; assuming 1200 (utf_16_le)\n") else: codepage = self.codepage if codepage in encoding_from_codepage: encoding = encoding_from_codepage[codepage] elif 300 <= codepage <= 1999: encoding = 'cp' + str(codepage) elif self.biff_version >= 80: self.codepage = 1200 encoding = 'utf_16_le' else: encoding = 'unknown_codepage_' + str(codepage) if DEBUG or (self.verbosity and encoding != self.encoding) : fprintf(self.logfile, "CODEPAGE: codepage %r -> encoding %r\n", codepage, encoding) self.encoding = encoding if self.codepage != 1200: # utf_16_le # If we don't have a codec that can decode ASCII into Unicode, # we're well & truly stuffed -- let the punter know ASAP. try: unicode(b'trial', self.encoding) except BaseException as e: fprintf(self.logfile, "ERROR *** codepage %r -> encoding %r -> %s: %s\n", self.codepage, self.encoding, type(e).__name__.split(".")[-1], e) raise if self.raw_user_name: strg = unpack_string(self.user_name, 0, self.encoding, lenlen=1) strg = strg.rstrip() # if DEBUG: # print "CODEPAGE: user name decoded from %r to %r" % (self.user_name, strg) self.user_name = strg self.raw_user_name = False return self.encoding def handle_codepage(self, data): # DEBUG = 0 codepage = unpack('<H', data[0:2])[0] self.codepage = codepage self.derive_encoding() def handle_country(self, data): countries = unpack('<HH', data[0:4]) if self.verbosity: print("Countries:", countries, file=self.logfile) # Note: in BIFF7 and earlier, country record was put (redundantly?) in each worksheet. assert self.countries == (0, 0) or self.countries == countries self.countries = countries def handle_datemode(self, data): datemode = unpack('<H', data[0:2])[0] if DEBUG or self.verbosity: fprintf(self.logfile, "DATEMODE: datemode %r\n", datemode) assert datemode in (0, 1) self.datemode = datemode def handle_externname(self, data): blah = DEBUG or self.verbosity >= 2 if self.biff_version >= 80: option_flags, other_info =unpack("<HI", data[:6]) pos = 6 name, pos = unpack_unicode_update_pos(data, pos, lenlen=1) extra = data[pos:] if self._supbook_types[-1] == SUPBOOK_ADDIN: self.addin_func_names.append(name) if blah: fprintf(self.logfile, "EXTERNNAME: sbktype=%d oflags=0x%04x oinfo=0x%08x name=%r extra=%r\n", self._supbook_types[-1], option_flags, other_info, name, extra) def handle_externsheet(self, data): self.derive_encoding() # in case CODEPAGE record missing/out of order/wrong self._extnsht_count += 1 # for use as a 1-based index blah1 = DEBUG or self.verbosity >= 1 blah2 = DEBUG or self.verbosity >= 2 if self.biff_version >= 80: num_refs = unpack("<H", data[0:2])[0] bytes_reqd = num_refs * 6 + 2 while len(data) < bytes_reqd: if blah1: fprintf( self.logfile, "INFO: EXTERNSHEET needs %d bytes, have %d\n", bytes_reqd, len(data), ) code2, length2, data2 = self.get_record_parts() if code2 != XL_CONTINUE: raise XLRDError("Missing CONTINUE after EXTERNSHEET record") data += data2 pos = 2 for k in xrange(num_refs): info = unpack("<HHH", data[pos:pos+6]) ref_recordx, ref_first_sheetx, ref_last_sheetx = info self._externsheet_info.append(info) pos += 6 if blah2: fprintf( self.logfile, "EXTERNSHEET(b8): k = %2d, record = %2d, first_sheet = %5d, last sheet = %5d\n", k, ref_recordx, ref_first_sheetx, ref_last_sheetx, ) else: nc, ty = unpack("<BB", data[:2]) if blah2: print("EXTERNSHEET(b7-):", file=self.logfile) hex_char_dump(data, 0, len(data), fout=self.logfile) msg = { 1: "Encoded URL", 2: "Current sheet!!", 3: "Specific sheet in own doc't", 4: "Nonspecific sheet in own doc't!!", }.get(ty, "Not encoded") print(" %3d chars, type is %d (%s)" % (nc, ty, msg), file=self.logfile) if ty == 3: sheet_name = unicode(data[2:nc+2], self.encoding) self._extnsht_name_from_num[self._extnsht_count] = sheet_name if blah2: print(self._extnsht_name_from_num, file=self.logfile) if not (1 <= ty <= 4): ty = 0 self._externsheet_type_b57.append(ty) def handle_filepass(self, data): if self.verbosity >= 2: logf = self.logfile fprintf(logf, "FILEPASS:\n") hex_char_dump(data, 0, len(data), base=0, fout=logf) if self.biff_version >= 80: kind1, = unpack('<H', data[:2]) if kind1 == 0: # weak XOR encryption key, hash_value = unpack('<HH', data[2:]) fprintf(logf, 'weak XOR: key=0x%04x hash=0x%04x\n', key, hash_value) elif kind1 == 1: kind2, = unpack('<H', data[4:6]) if kind2 == 1: # BIFF8 standard encryption caption = "BIFF8 std" elif kind2 == 2: caption = "BIFF8 strong" else: caption = "** UNKNOWN ENCRYPTION METHOD **" fprintf(logf, "%s\n", caption) raise XLRDError("Workbook is encrypted") def handle_name(self, data): blah = DEBUG or self.verbosity >= 2 bv = self.biff_version if bv < 50: return self.derive_encoding() # print # hex_char_dump(data, 0, len(data), fout=self.logfile) ( option_flags, kb_shortcut, name_len, fmla_len, extsht_index, sheet_index, menu_text_len, description_text_len, help_topic_text_len, status_bar_text_len, ) = unpack("<HBBHHH4B", data[0:14]) nobj = Name() nobj.book = self ### CIRCULAR ### name_index = len(self.name_obj_list) nobj.name_index = name_index self.name_obj_list.append(nobj) nobj.option_flags = option_flags attrs = [ ('hidden', 1, 0), ('func', 2, 1), ('vbasic', 4, 2), ('macro', 8, 3), ('complex', 0x10, 4), ('builtin', 0x20, 5), ('funcgroup', 0xFC0, 6), ('binary', 0x1000, 12), ] for attr, mask, nshift in attrs: setattr(nobj, attr, (option_flags & mask) >> nshift) macro_flag = " M"[nobj.macro] if bv < 80: internal_name, pos = unpack_string_update_pos(data, 14, self.encoding, known_len=name_len) else: internal_name, pos = unpack_unicode_update_pos(data, 14, known_len=name_len) nobj.extn_sheet_num = extsht_index nobj.excel_sheet_index = sheet_index nobj.scope = None # patched up in the names_epilogue() method if blah: fprintf( self.logfile, "NAME[%d]:%s oflags=%d, name_len=%d, fmla_len=%d, extsht_index=%d, sheet_index=%d, name=%r\n", name_index, macro_flag, option_flags, name_len, fmla_len, extsht_index, sheet_index, internal_name) name = internal_name if nobj.builtin: name = builtin_name_from_code.get(name, "??Unknown??") if blah: print(" builtin: %s" % name, file=self.logfile) nobj.name = name nobj.raw_formula = data[pos:] nobj.basic_formula_len = fmla_len nobj.evaluated = 0 if blah: nobj.dump( self.logfile, header="--- handle_name: name[%d] ---" % name_index, footer="-------------------", ) def names_epilogue(self): blah = self.verbosity >= 2 f = self.logfile if blah: print("+++++ names_epilogue +++++", file=f) print("_all_sheets_map", REPR(self._all_sheets_map), file=f) print("_extnsht_name_from_num", REPR(self._extnsht_name_from_num), file=f) print("_sheet_num_from_name", REPR(self._sheet_num_from_name), file=f) num_names = len(self.name_obj_list) for namex in range(num_names): nobj = self.name_obj_list[namex] # Convert from excel_sheet_index to scope. # This is done here because in BIFF7 and earlier, the # BOUNDSHEET records (from which _all_sheets_map is derived) # come after the NAME records. if self.biff_version >= 80: sheet_index = nobj.excel_sheet_index if sheet_index == 0: intl_sheet_index = -1 # global elif 1 <= sheet_index <= len(self._all_sheets_map): intl_sheet_index = self._all_sheets_map[sheet_index-1] if intl_sheet_index == -1: # maps to a macro or VBA sheet intl_sheet_index = -2 # valid sheet reference but not useful else: # huh? intl_sheet_index = -3 # invalid elif 50 <= self.biff_version <= 70: sheet_index = nobj.extn_sheet_num if sheet_index == 0: intl_sheet_index = -1 # global else: sheet_name = self._extnsht_name_from_num[sheet_index] intl_sheet_index = self._sheet_num_from_name.get(sheet_name, -2) nobj.scope = intl_sheet_index for namex in range(num_names): nobj = self.name_obj_list[namex] # Parse the formula ... if nobj.macro or nobj.binary: continue if nobj.evaluated: continue evaluate_name_formula(self, nobj, namex, blah=blah) if self.verbosity >= 2: print("---------- name object dump ----------", file=f) for namex in range(num_names): nobj = self.name_obj_list[namex] nobj.dump(f, header="--- name[%d] ---" % namex) print("--------------------------------------", file=f) # # Build some dicts for access to the name objects # name_and_scope_map = {} # (name.lower(), scope): Name_object name_map = {} # name.lower() : list of Name_objects (sorted in scope order) for namex in range(num_names): nobj = self.name_obj_list[namex] name_lcase = nobj.name.lower() key = (name_lcase, nobj.scope) if key in name_and_scope_map and self.verbosity: fprintf(f, 'Duplicate entry %r in name_and_scope_map\n', key) name_and_scope_map[key] = nobj sort_data = (nobj.scope, namex, nobj) # namex (a temp unique ID) ensures the Name objects will not # be compared (fatal in py3) if name_lcase in name_map: name_map[name_lcase].append(sort_data) else: name_map[name_lcase] = [sort_data] for key in name_map.keys(): alist = name_map[key] alist.sort() name_map[key] = [x[2] for x in alist] self.name_and_scope_map = name_and_scope_map self.name_map = name_map def handle_obj(self, data): # Not doing much handling at all. # Worrying about embedded (BOF ... EOF) substreams is done elsewhere. # DEBUG = 1 obj_type, obj_id = unpack('<HI', data[4:10]) # if DEBUG: print "---> handle_obj type=%d id=0x%08x" % (obj_type, obj_id) def handle_supbook(self, data): # aka EXTERNALBOOK in OOo docs self._supbook_types.append(None) blah = DEBUG or self.verbosity >= 2 if blah: print("SUPBOOK:", file=self.logfile) hex_char_dump(data, 0, len(data), fout=self.logfile) num_sheets = unpack("<H", data[0:2])[0] if blah: print("num_sheets = %d" % num_sheets, file=self.logfile) sbn = self._supbook_count self._supbook_count += 1 if data[2:4] == b"\x01\x04": self._supbook_types[-1] = SUPBOOK_INTERNAL self._supbook_locals_inx = self._supbook_count - 1 if blah: print("SUPBOOK[%d]: internal 3D refs; %d sheets" % (sbn, num_sheets), file=self.logfile) print(" _all_sheets_map", self._all_sheets_map, file=self.logfile) return if data[0:4] == b"\x01\x00\x01\x3A": self._supbook_types[-1] = SUPBOOK_ADDIN self._supbook_addins_inx = self._supbook_count - 1 if blah: print("SUPBOOK[%d]: add-in functions" % sbn, file=self.logfile) return url, pos = unpack_unicode_update_pos(data, 2, lenlen=2) if num_sheets == 0: self._supbook_types[-1] = SUPBOOK_DDEOLE if blah: fprintf(self.logfile, "SUPBOOK[%d]: DDE/OLE document = %r\n", sbn, url) return self._supbook_types[-1] = SUPBOOK_EXTERNAL if blah: fprintf(self.logfile, "SUPBOOK[%d]: url = %r\n", sbn, url) sheet_names = [] for x in range(num_sheets): try: shname, pos = unpack_unicode_update_pos(data, pos, lenlen=2) except struct.error: # #### FIX ME #### # Should implement handling of CONTINUE record(s) ... if self.verbosity: print( "*** WARNING: unpack failure in sheet %d of %d in SUPBOOK record for file %r" % (x, num_sheets, url), file=self.logfile, ) break sheet_names.append(shname) if blah: fprintf(self.logfile, " sheetx=%d namelen=%d name=%r (next pos=%d)\n", x, len(shname), shname, pos) def handle_sheethdr(self, data): # This a BIFF 4W special. # The SHEETHDR record is followed by a (BOF ... EOF) substream containing # a worksheet. # DEBUG = 1 self.derive_encoding() sheet_len = unpack('<i', data[:4])[0] sheet_name = unpack_string(data, 4, self.encoding, lenlen=1) sheetno = self._sheethdr_count assert sheet_name == self._sheet_names[sheetno] self._sheethdr_count += 1 BOF_posn = self._position posn = BOF_posn - 4 - len(data) if DEBUG: fprintf(self.logfile, 'SHEETHDR %d at posn %d: len=%d name=%r\n', sheetno, posn, sheet_len, sheet_name) self.initialise_format_info() if DEBUG: print('SHEETHDR: xf epilogue flag is %d' % self._xf_epilogue_done, file=self.logfile) self._sheet_list.append(None) # get_sheet updates _sheet_list but needs a None beforehand self.get_sheet(sheetno, update_pos=False) if DEBUG: print('SHEETHDR: posn after get_sheet() =', self._position, file=self.logfile) self._position = BOF_posn + sheet_len def handle_sheetsoffset(self, data): # DEBUG = 0 posn = unpack('<i', data)[0] if DEBUG: print('SHEETSOFFSET:', posn, file=self.logfile) self._sheetsoffset = posn def handle_sst(self, data): # DEBUG = 1 if DEBUG: print("SST Processing", file=self.logfile) t0 = perf_counter() nbt = len(data) strlist = [data] uniquestrings = unpack('<i', data[4:8])[0] if DEBUG or self.verbosity >= 2: fprintf(self.logfile, "SST: unique strings: %d\n", uniquestrings) while 1: code, nb, data = self.get_record_parts_conditional(XL_CONTINUE) if code is None: break nbt += nb if DEBUG >= 2: fprintf(self.logfile, "CONTINUE: adding %d bytes to SST -> %d\n", nb, nbt) strlist.append(data) self._sharedstrings, rt_runlist = unpack_SST_table(strlist, uniquestrings) if self.formatting_info: self._rich_text_runlist_map = rt_runlist if DEBUG: t1 = perf_counter() print("SST processing took %.2f seconds" % (t1 - t0, ), file=self.logfile) def handle_writeaccess(self, data): DEBUG = 0 if self.biff_version < 80: if not self.encoding: self.raw_user_name = True self.user_name = data return strg = unpack_string(data, 0, self.encoding, lenlen=1) else: try: strg = unpack_unicode(data, 0, lenlen=2) except UnicodeDecodeError: # may have invalid trailing characters strg = unpack_unicode(data.strip(), 0, lenlen=2) if DEBUG: fprintf(self.logfile, "WRITEACCESS: %d bytes; raw=%s %r\n", len(data), self.raw_user_name, strg) strg = strg.rstrip() self.user_name = strg def parse_globals(self): # DEBUG = 0 # no need to position, just start reading (after the BOF) formatting.initialise_book(self) while 1: rc, length, data = self.get_record_parts() if DEBUG: print("parse_globals: record code is 0x%04x" % rc, file=self.logfile) if rc == XL_SST: self.handle_sst(data) elif rc == XL_FONT or rc == XL_FONT_B3B4: self.handle_font(data) elif rc == XL_FORMAT: # XL_FORMAT2 is BIFF <= 3.0, can't appear in globals self.handle_format(data) elif rc == XL_XF: self.handle_xf(data) elif rc == XL_BOUNDSHEET: self.handle_boundsheet(data) elif rc == XL_DATEMODE: self.handle_datemode(data) elif rc == XL_CODEPAGE: self.handle_codepage(data) elif rc == XL_COUNTRY: self.handle_country(data) elif rc == XL_EXTERNNAME: self.handle_externname(data) elif rc == XL_EXTERNSHEET: self.handle_externsheet(data) elif rc == XL_FILEPASS: self.handle_filepass(data) elif rc == XL_WRITEACCESS: self.handle_writeaccess(data) elif rc == XL_SHEETSOFFSET: self.handle_sheetsoffset(data) elif rc == XL_SHEETHDR: self.handle_sheethdr(data) elif rc == XL_SUPBOOK: self.handle_supbook(data) elif rc == XL_NAME: self.handle_name(data) elif rc == XL_PALETTE: self.handle_palette(data) elif rc == XL_STYLE: self.handle_style(data) elif rc & 0xff == 9 and self.verbosity: fprintf(self.logfile, "*** Unexpected BOF at posn %d: 0x%04x len=%d data=%r\n", self._position - length - 4, rc, length, data) elif rc == XL_EOF: self.xf_epilogue() self.names_epilogue() self.palette_epilogue() if not self.encoding: self.derive_encoding() if self.biff_version == 45: # DEBUG = 0 if DEBUG: print("global EOF: position", self._position, file=self.logfile) # if DEBUG: # pos = self._position - 4 # print repr(self.mem[pos:pos+40]) return else: # if DEBUG: # print >> self.logfile, "parse_globals: ignoring record code 0x%04x" % rc pass def read(self, pos, length): data = self.mem[pos:pos+length] self._position = pos + len(data) return data def getbof(self, rqd_stream): # DEBUG = 1 # if DEBUG: print >> self.logfile, "getbof(): position", self._position if DEBUG: print("reqd: 0x%04x" % rqd_stream, file=self.logfile) def bof_error(msg): raise XLRDError('Unsupported format, or corrupt file: ' + msg) savpos = self._position opcode = self.get2bytes() if opcode == MY_EOF: bof_error('Expected BOF record; met end of file') if opcode not in bofcodes: bof_error('Expected BOF record; found %r' % self.mem[savpos:savpos+8]) length = self.get2bytes() if length == MY_EOF: bof_error('Incomplete BOF record[1]; met end of file') if not (4 <= length <= 20): bof_error( 'Invalid length (%d) for BOF record type 0x%04x' % (length, opcode)) padding = b'\0' * max(0, boflen[opcode] - length) data = self.read(self._position, length) if DEBUG: fprintf(self.logfile, "\ngetbof(): data=%r\n", data) if len(data) < length: bof_error('Incomplete BOF record[2]; met end of file') data += padding version1 = opcode >> 8 version2, streamtype = unpack('<HH', data[0:4]) if DEBUG: print("getbof(): op=0x%04x version2=0x%04x streamtype=0x%04x" % (opcode, version2, streamtype), file=self.logfile) bof_offset = self._position - 4 - length if DEBUG: print("getbof(): BOF found at offset %d; savpos=%d" % (bof_offset, savpos), file=self.logfile) version = build = year = 0 if version1 == 0x08: build, year = unpack('<HH', data[4:8]) if version2 == 0x0600: version = 80 elif version2 == 0x0500: if year < 1994 or build in (2412, 3218, 3321): version = 50 else: version = 70 else: # dodgy one, created by a 3rd-party tool version = { 0x0000: 21, 0x0007: 21, 0x0200: 21, 0x0300: 30, 0x0400: 40, }.get(version2, 0) elif version1 in (0x04, 0x02, 0x00): version = {0x04: 40, 0x02: 30, 0x00: 21}[version1] if version == 40 and streamtype == XL_WORKBOOK_GLOBALS_4W: version = 45 # i.e. 4W if DEBUG or self.verbosity >= 2: print("BOF: op=0x%04x vers=0x%04x stream=0x%04x buildid=%d buildyr=%d -> BIFF%d" % (opcode, version2, streamtype, build, year, version), file=self.logfile) got_globals = streamtype == XL_WORKBOOK_GLOBALS or ( version == 45 and streamtype == XL_WORKBOOK_GLOBALS_4W) if (rqd_stream == XL_WORKBOOK_GLOBALS and got_globals) or streamtype == rqd_stream: return version if version < 50 and streamtype == XL_WORKSHEET: return version if version >= 50 and streamtype == 0x0100: bof_error("Workspace file -- no spreadsheet data") bof_error( 'BOF not workbook/worksheet: op=0x%04x vers=0x%04x strm=0x%04x build=%d year=%d -> BIFF%d' % (opcode, version2, streamtype, build, year, version) ) # === helper functions def expand_cell_address(inrow, incol): # Ref : OOo docs, "4.3.4 Cell Addresses in BIFF8" outrow = inrow if incol & 0x8000: if outrow >= 32768: outrow -= 65536 relrow = 1 else: relrow = 0 outcol = incol & 0xFF if incol & 0x4000: if outcol >= 128: outcol -= 256 relcol = 1 else: relcol = 0 return outrow, outcol, relrow, relcol def colname(colx, _A2Z="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): assert colx >= 0 name = UNICODE_LITERAL('') while 1: quot, rem = divmod(colx, 26) name = _A2Z[rem] + name if not quot: return name colx = quot - 1 def display_cell_address(rowx, colx, relrow, relcol): if relrow: rowpart = "(*%s%d)" % ("+-"[rowx < 0], abs(rowx)) else: rowpart = "$%d" % (rowx+1,) if relcol: colpart = "(*%s%d)" % ("+-"[colx < 0], abs(colx)) else: colpart = "$" + colname(colx) return colpart + rowpart def unpack_SST_table(datatab, nstrings): "Return list of strings" datainx = 0 ndatas = len(datatab) data = datatab[0] datalen = len(data) pos = 8 strings = [] strappend = strings.append richtext_runs = {} local_unpack = unpack local_min = min local_BYTES_ORD = BYTES_ORD latin_1 = "latin_1" for _unused_i in xrange(nstrings): nchars = local_unpack('<H', data[pos:pos+2])[0] pos += 2 options = local_BYTES_ORD(data[pos]) pos += 1 rtcount = 0 phosz = 0 if options & 0x08: # richtext rtcount = local_unpack('<H', data[pos:pos+2])[0] pos += 2 if options & 0x04: # phonetic phosz = local_unpack('<i', data[pos:pos+4])[0] pos += 4 accstrg = UNICODE_LITERAL('') charsgot = 0 while 1: charsneed = nchars - charsgot if options & 0x01: # Uncompressed UTF-16 charsavail = local_min((datalen - pos) >> 1, charsneed) rawstrg = data[pos:pos+2*charsavail] # if DEBUG: print "SST U16: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg) try: accstrg += unicode(rawstrg, "utf_16_le") except: # print "SST U16: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg) # Probable cause: dodgy data e.g. unfinished surrogate pair. # E.g. file unicode2.xls in pyExcelerator's examples has cells containing # unichr(i) for i in range(0x100000) # so this will include 0xD800 etc raise pos += 2*charsavail else: # Note: this is COMPRESSED (not ASCII!) encoding!!! charsavail = local_min(datalen - pos, charsneed) rawstrg = data[pos:pos+charsavail] # if DEBUG: print "SST CMPRSD: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg) accstrg += unicode(rawstrg, latin_1) pos += charsavail charsgot += charsavail if charsgot == nchars: break datainx += 1 data = datatab[datainx] datalen = len(data) options = local_BYTES_ORD(data[0]) pos = 1 if rtcount: runs = [] for runindex in xrange(rtcount): if pos == datalen: pos = 0 datainx += 1 data = datatab[datainx] datalen = len(data) runs.append(local_unpack("<HH", data[pos:pos+4])) pos += 4 richtext_runs[len(strings)] = runs pos += phosz # size of the phonetic stuff to skip if pos >= datalen: # adjust to correct position in next record pos = pos - datalen datainx += 1 if datainx < ndatas: data = datatab[datainx] datalen = len(data) else: assert _unused_i == nstrings - 1 strappend(accstrg) return strings, richtext_runs
Book
python
pypa__setuptools
setuptools/_vendor/wheel/wheelfile.py
{ "start": 1348, "end": 8411 }
class ____(ZipFile): """A ZipFile derivative class that also reads SHA-256 hashes from .dist-info/RECORD and checks any read files against those. """ _default_algorithm = hashlib.sha256 def __init__( self, file: StrPath, mode: Literal["r", "w", "x", "a"] = "r", compression: int = ZIP_DEFLATED, ): basename = os.path.basename(file) self.parsed_filename = WHEEL_INFO_RE.match(basename) if not basename.endswith(".whl") or self.parsed_filename is None: raise WheelError(f"Bad wheel filename {basename!r}") ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True) self.dist_info_path = "{}.dist-info".format( self.parsed_filename.group("namever") ) self.record_path = self.dist_info_path + "/RECORD" self._file_hashes: dict[str, tuple[None, None] | tuple[int, bytes]] = {} self._file_sizes = {} if mode == "r": # Ignore RECORD and any embedded wheel signatures self._file_hashes[self.record_path] = None, None self._file_hashes[self.record_path + ".jws"] = None, None self._file_hashes[self.record_path + ".p7s"] = None, None # Fill in the expected hashes by reading them from RECORD try: record = self.open(self.record_path) except KeyError: raise WheelError(f"Missing {self.record_path} file") from None with record: for line in csv.reader( TextIOWrapper(record, newline="", encoding="utf-8") ): path, hash_sum, size = line if not hash_sum: continue algorithm, hash_sum = hash_sum.split("=") try: hashlib.new(algorithm) except ValueError: raise WheelError( f"Unsupported hash algorithm: {algorithm}" ) from None if algorithm.lower() in {"md5", "sha1"}: raise WheelError( f"Weak hash algorithm ({algorithm}) is not permitted by " f"PEP 427" ) self._file_hashes[path] = ( algorithm, urlsafe_b64decode(hash_sum.encode("ascii")), ) def open( self, name_or_info: str | ZipInfo, mode: Literal["r", "w"] = "r", pwd: bytes | None = None, ) -> IO[bytes]: def _update_crc(newdata: bytes) -> None: eof = ef._eof update_crc_orig(newdata) running_hash.update(newdata) if eof and running_hash.digest() != expected_hash: raise WheelError(f"Hash mismatch for file '{ef_name}'") ef_name = ( name_or_info.filename if isinstance(name_or_info, ZipInfo) else name_or_info ) if ( mode == "r" and not ef_name.endswith("/") and ef_name not in self._file_hashes ): raise WheelError(f"No hash found for file '{ef_name}'") ef = ZipFile.open(self, name_or_info, mode, pwd) if mode == "r" and not ef_name.endswith("/"): algorithm, expected_hash = self._file_hashes[ef_name] if expected_hash is not None: # Monkey patch the _update_crc method to also check for the hash from # RECORD running_hash = hashlib.new(algorithm) update_crc_orig, ef._update_crc = ef._update_crc, _update_crc return ef def write_files(self, base_dir: str): log.info(f"creating '{self.filename}' and adding '{base_dir}' to it") deferred: list[tuple[str, str]] = [] for root, dirnames, filenames in os.walk(base_dir): # Sort the directory names so that `os.walk` will walk them in a # defined order on the next iteration. dirnames.sort() for name in sorted(filenames): path = os.path.normpath(os.path.join(root, name)) if os.path.isfile(path): arcname = os.path.relpath(path, base_dir).replace(os.path.sep, "/") if arcname == self.record_path: pass elif root.endswith(".dist-info"): deferred.append((path, arcname)) else: self.write(path, arcname) deferred.sort() for path, arcname in deferred: self.write(path, arcname) def write( self, filename: str, arcname: str | None = None, compress_type: int | None = None, ) -> None: with open(filename, "rb") as f: st = os.fstat(f.fileno()) data = f.read() zinfo = ZipInfo( arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime) ) zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16 zinfo.compress_type = compress_type or self.compression self.writestr(zinfo, data, compress_type) def writestr( self, zinfo_or_arcname: str | ZipInfo, data: SizedBuffer | str, compress_type: int | None = None, ): if isinstance(zinfo_or_arcname, str): zinfo_or_arcname = ZipInfo( zinfo_or_arcname, date_time=get_zipinfo_datetime() ) zinfo_or_arcname.compress_type = self.compression zinfo_or_arcname.external_attr = (0o664 | stat.S_IFREG) << 16 if isinstance(data, str): data = data.encode("utf-8") ZipFile.writestr(self, zinfo_or_arcname, data, compress_type) fname = ( zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo) else zinfo_or_arcname ) log.info(f"adding '{fname}'") if fname != self.record_path: hash_ = self._default_algorithm(data) self._file_hashes[fname] = ( hash_.name, urlsafe_b64encode(hash_.digest()).decode("ascii"), ) self._file_sizes[fname] = len(data) def close(self): # Write RECORD if self.fp is not None and self.mode == "w" and self._file_hashes: data = StringIO() writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n") writer.writerows( ( (fname, algorithm + "=" + hash_, self._file_sizes[fname]) for fname, (algorithm, hash_) in self._file_hashes.items() ) ) writer.writerow((format(self.record_path), "", "")) self.writestr(self.record_path, data.getvalue()) ZipFile.close(self)
WheelFile
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_cloud_build.py
{ "start": 3420, "end": 12202 }
class ____: @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_cancel_build(self, mock_hook): mock_hook.return_value.cancel_build.return_value = Build() operator = CloudBuildCancelBuildOperator(id_=TRIGGER_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.cancel_build.assert_called_once_with( id_=TRIGGER_ID, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global" ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_create_build(self, mock_hook): mock_hook.return_value.create_build_without_waiting_for_result.return_value = (BUILD, BUILD_ID) mock_hook.return_value.wait_for_operation.return_value = Build() operator = CloudBuildCreateBuildOperator(build=BUILD, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) build = Build(BUILD) mock_hook.return_value.create_build_without_waiting_for_result.assert_called_once_with( build=build, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global" ) mock_hook.return_value.wait_for_operation.assert_called_once_with(timeout=None, operation=BUILD) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_create_build_with_missing_build(self, mock_hook): mock_hook.return_value.create_build_without_waiting_for_result.return_value = Build() with pytest.raises((TypeError, AirflowException), match="missing keyword argument 'build'"): CloudBuildCreateBuildOperator(task_id="id") @pytest.mark.parametrize( ("file_type", "file_content"), [ ( ".json", json.dumps({"steps": [{"name": "ubuntu", "args": ["echo", "Hello {{ params.name }}!"]}]}), ), ( ".yaml", """ steps: - name: 'ubuntu' args: ['echo', 'Hello {{ params.name }}!'] """, ), ], ) def test_load_templated(self, file_type, file_content): with tempfile.NamedTemporaryFile(suffix=file_type, mode="w+") as f: f.writelines(file_content) f.flush() operator = CloudBuildCreateBuildOperator( build=f.name, task_id="task-id", params={"name": "airflow"} ) operator.prepare_template() expected_body = {"steps": [{"name": "ubuntu", "args": ["echo", "Hello {{ params.name }}!"]}]} assert expected_body == operator.build @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_create_build_trigger(self, mock_hook): mock_hook.return_value.create_build_trigger.return_value = BuildTrigger() operator = CloudBuildCreateBuildTriggerOperator(trigger=BUILD_TRIGGER, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.create_build_trigger.assert_called_once_with( trigger=BUILD_TRIGGER, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global", ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_delete_build_trigger(self, mock_hook): mock_hook.return_value.delete_build_trigger.return_value = None operator = CloudBuildDeleteBuildTriggerOperator(trigger_id=TRIGGER_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.delete_build_trigger.assert_called_once_with( trigger_id=TRIGGER_ID, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global", ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_get_build(self, mock_hook): mock_hook.return_value.get_build.return_value = Build() operator = CloudBuildGetBuildOperator(id_=BUILD_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.get_build.assert_called_once_with( id_=BUILD_ID, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global" ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_get_build_trigger(self, mock_hook): mock_hook.return_value.get_build_trigger.return_value = BuildTrigger() operator = CloudBuildGetBuildTriggerOperator(trigger_id=TRIGGER_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.get_build_trigger.assert_called_once_with( trigger_id=TRIGGER_ID, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global", ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_list_build_triggers(self, mock_hook): mock_hook.return_value.list_build_triggers.return_value = mock.MagicMock() operator = CloudBuildListBuildTriggersOperator(task_id="id", location="global") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.list_build_triggers.assert_called_once_with( project_id=None, location="global", page_size=None, page_token=None, retry=DEFAULT, timeout=None, metadata=(), ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_list_builds(self, mock_hook): mock_hook.return_value.list_builds.return_value = mock.MagicMock() operator = CloudBuildListBuildsOperator(task_id="id", location="global") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.list_builds.assert_called_once_with( project_id=None, location="global", page_size=None, filter_=None, retry=DEFAULT, timeout=None, metadata=(), ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_retry_build(self, mock_hook): mock_hook.return_value.retry_build.return_value = Build() operator = CloudBuildRetryBuildOperator(id_=BUILD_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.retry_build.assert_called_once_with( id_=BUILD_ID, project_id=None, wait=True, retry=DEFAULT, timeout=None, metadata=(), location="global", ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_run_build_trigger(self, mock_hook): mock_hook.return_value.run_build_trigger.return_value = Build() operator = CloudBuildRunBuildTriggerOperator(trigger_id=TRIGGER_ID, source=REPO_SOURCE, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.run_build_trigger.assert_called_once_with( trigger_id=TRIGGER_ID, source=REPO_SOURCE, project_id=None, wait=True, retry=DEFAULT, timeout=None, metadata=(), location="global", ) @mock.patch(CLOUD_BUILD_HOOK_PATH) def test_update_build_trigger(self, mock_hook): mock_hook.return_value.update_build_trigger.return_value = BuildTrigger() operator = CloudBuildUpdateBuildTriggerOperator( trigger_id=TRIGGER_ID, trigger=BUILD_TRIGGER, task_id="id" ) operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None) mock_hook.return_value.update_build_trigger.assert_called_once_with( trigger_id=TRIGGER_ID, trigger=BUILD_TRIGGER, project_id=None, retry=DEFAULT, timeout=None, metadata=(), location="global", )
TestCloudBuildOperator
python
huggingface__transformers
src/transformers/models/bridgetower/modeling_bridgetower.py
{ "start": 22512, "end": 26126 }
class ____(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.scaling = self.attention_head_size**-0.5 self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_causal = is_causal self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[EncoderDecoderCache] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = encoder_hidden_states.shape[1] q_input_shape = (bsz, tgt_len, -1, self.attention_head_size) kv_input_shape = (bsz, src_len, -1, self.attention_head_size) # get query proj query_layer = self.query(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False if past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].keys value_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].values else: key_layer = self.key(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2) value_layer = self.value(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all states to the cache key_layer, value_layer = past_key_values.cross_attention_cache.update( key_layer, value_layer, self.layer_idx ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() return attn_output, attn_weights # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BridgeTower,BERT->BRIDGE_TOWER
BridgeTowerCrossAttention
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 7022, "end": 7456 }
class ____(BaseModel): """ Schema for updating TaskInstance to a up_for_reschedule state. """ model_config = ConfigDict( extra="forbid", ) state: Annotated[Literal["up_for_reschedule"] | None, Field(title="State")] = "up_for_reschedule" reschedule_date: Annotated[AwareDatetime, Field(title="Reschedule Date")] end_date: Annotated[AwareDatetime, Field(title="End Date")]
TIRescheduleStatePayload
python
PrefectHQ__prefect
src/prefect/client/schemas/objects.py
{ "start": 25462, "end": 29627 }
class ____(TimeSeriesBaseModel, ObjectBaseModel): name: str = Field( default_factory=lambda: generate_slug(2), examples=["my-task-run"] ) flow_run_id: Optional[UUID] = Field( default=None, description="The flow run id of the task run." ) task_key: str = Field( default=..., description="A unique identifier for the task being run." ) dynamic_key: str = Field( default=..., description=( "A dynamic key used to differentiate between multiple runs of the same task" " within the same flow run." ), ) cache_key: Optional[str] = Field( default=None, description=( "An optional cache key. If a COMPLETED state associated with this cache key" " is found, the cached COMPLETED state will be used instead of executing" " the task run." ), ) cache_expiration: Optional[DateTime] = Field( default=None, description="Specifies when the cached state should expire." ) task_version: Optional[str] = Field( default=None, description="The version of the task being run." ) empirical_policy: TaskRunPolicy = Field( default_factory=TaskRunPolicy, ) tags: list[str] = Field( default_factory=list, description="A list of tags for the task run.", examples=[["tag-1", "tag-2"]], ) labels: KeyValueLabelsField = Field(default_factory=dict) state_id: Optional[UUID] = Field( default=None, description="The id of the current task run state." ) task_inputs: dict[ str, list[Union[TaskRunResult, FlowRunResult, Parameter, Constant]] ] = Field( default_factory=dict, description=( "Tracks the source of inputs to a task run. Used for internal bookkeeping. " "Note the special __parents__ key, used to indicate a parent/child " "relationship that may or may not include an input or wait_for semantic." ), ) state_type: Optional[StateType] = Field( default=None, description="The type of the current task run state." ) state_name: Optional[str] = Field( default=None, description="The name of the current task run state." ) run_count: int = Field( default=0, description="The number of times the task run has been executed." ) flow_run_run_count: int = Field( default=0, description=( "If the parent flow has retried, this indicates the flow retry this run is" " associated with." ), ) expected_start_time: Optional[DateTime] = Field( default=None, description="The task run's expected start time.", ) # the next scheduled start time will be populated # whenever the run is in a scheduled state next_scheduled_start_time: Optional[DateTime] = Field( default=None, description="The next time the task run is scheduled to start.", ) start_time: Optional[DateTime] = Field( default=None, description="The actual start time." ) end_time: Optional[DateTime] = Field( default=None, description="The actual end time." ) total_run_time: datetime.timedelta = Field( default=datetime.timedelta(0), description=( "Total run time. If the task run was executed multiple times, the time of" " each run will be summed." ), ) estimated_run_time: datetime.timedelta = Field( default=datetime.timedelta(0), description="A real-time estimate of total run time.", ) estimated_start_time_delta: datetime.timedelta = Field( default=datetime.timedelta(0), description="The difference between actual and expected start time.", ) state: Optional[State] = Field( default=None, description="The state of the task run.", examples=["State(type=StateType.COMPLETED)"], ) @field_validator("name", mode="before") @classmethod def set_default_name(cls, name: Optional[str]) -> Name: return get_or_create_run_name(name)
TaskRun
python
astropy__astropy
astropy/table/tests/test_table.py
{ "start": 69260, "end": 71619 }
class ____(SetupData): def test_fail_replace_column(self, table_types): """Raise exception when trying to replace column via table.columns object""" self._setup(table_types) t = table_types.Table([self.a, self.b]) with pytest.raises( ValueError, match=r"Cannot replace column 'a'. Use Table.replace_column.. instead.", ): t.columns["a"] = [1, 2, 3] with pytest.raises( ValueError, match=r"column name not there is not in the table" ): t.replace_column("not there", [1, 2, 3]) with pytest.raises( ValueError, match=r"length of new column must match table length" ): t.replace_column("a", [1, 2]) def test_replace_column(self, table_types): """Replace existing column with a new column""" self._setup(table_types) t = table_types.Table([self.a, self.b]) ta = t["a"] tb = t["b"] vals = [1.2, 3.4, 5.6] for col in ( vals, table_types.Column(vals), table_types.Column(vals, name="a"), table_types.Column(vals, name="b"), ): t.replace_column("a", col) assert np.all(t["a"] == vals) assert t["a"] is not ta # New a column assert t["b"] is tb # Original b column unchanged assert t.colnames == ["a", "b"] assert t["a"].meta == {} assert t["a"].format is None # Special case: replacing the only column can resize table del t["b"] assert len(t) == 3 t["a"] = [1, 2] assert len(t) == 2 def test_replace_index_column(self, table_types): """Replace index column and generate expected exception""" self._setup(table_types) t = table_types.Table([self.a, self.b]) t.add_index("a") with pytest.raises(ValueError) as err: t.replace_column("a", [1, 2, 3]) assert err.value.args[0] == "cannot replace a table index column" def test_replace_column_no_copy(self): t = Table([[1, 2], [3, 4]], names=["a", "b"]) a = np.array([1.5, 2.5]) t.replace_column("a", a, copy=False) assert t["a"][0] == a[0] t["a"][0] = 10 assert t["a"][0] == a[0]
TestReplaceColumn
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 46463, "end": 46657 }
class ____(BaseModel, extra="forbid"): """ ID-based filtering condition """ has_id: List["ExtendedPointId"] = Field(..., description="ID-based filtering condition")
HasIdCondition
python
getsentry__sentry
src/sentry/integrations/slack/webhooks/base.py
{ "start": 1572, "end": 4726 }
class ____(Endpoint, abc.ABC): slack_request_class = SlackDMRequest def post_dispatcher(self, request: SlackDMRequest) -> Response: """ All Slack commands are handled by this endpoint. This block just validates the request and dispatches it to the right handler. """ cmd_input = request.get_command_input() try: return SlackCommandDispatcher(self, request).dispatch(cmd_input) except CommandNotMatchedError: # If we cannot interpret the command, print help text. request_data = request.data unknown_command = request_data.get("text", "").lower() return self.help(slack_request=request, command=unknown_command) def reply(self, slack_request: SlackDMRequest, message: str) -> Response: raise NotImplementedError def help(self, slack_request: SlackDMRequest, command: str) -> Response: return self.respond( SlackHelpMessageBuilder( command=command, integration_id=slack_request.integration.id if slack_request.integration else None, ).build() ) def link_user(self, slack_request: SlackDMRequest) -> Response: from sentry.integrations.slack.views.link_identity import build_linking_url if slack_request.has_identity: return self.reply( slack_request, ALREADY_LINKED_MESSAGE.format(username=slack_request.identity_str) ) if not (slack_request.integration and slack_request.user_id and slack_request.channel_id): logger.error(".link-user.bad_request.error", extra={"slack_request": slack_request}) raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) associate_url = build_linking_url( integration=slack_request.integration, slack_id=slack_request.user_id, channel_id=slack_request.channel_id, response_url=slack_request.response_url, ) return self.reply(slack_request, LINK_USER_MESSAGE.format(associate_url=associate_url)) def unlink_user(self, slack_request: SlackDMRequest) -> Response: from sentry.integrations.slack.views.unlink_identity import build_unlinking_url if not slack_request.has_identity: return self.reply(slack_request, NOT_LINKED_MESSAGE) if not (slack_request.integration and slack_request.user_id and slack_request.channel_id): raise SlackRequestError(status=status.HTTP_400_BAD_REQUEST) associate_url = build_unlinking_url( integration_id=slack_request.integration.id, slack_id=slack_request.user_id, channel_id=slack_request.channel_id, response_url=slack_request.response_url, ) return self.reply(slack_request, UNLINK_USER_MESSAGE.format(associate_url=associate_url)) def link_team(self, slack_request: SlackDMRequest) -> Response: raise NotImplementedError def unlink_team(self, slack_request: SlackDMRequest) -> Response: raise NotImplementedError @dataclass(frozen=True)
SlackDMEndpoint
python
pypa__warehouse
tests/common/db/banners.py
{ "start": 137, "end": 419 }
class ____(WarehouseFactory): class Meta: model = Banner name = factory.Faker("word") text = factory.Faker("sentence") link_url = factory.Faker("uri") link_label = factory.Faker("word") active = True end = factory.Faker("future_date")
BannerFactory
python
plotly__plotly.py
plotly/graph_objs/histogram/_marker.py
{ "start": 233, "end": 25231 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram" _path_str = "histogram.marker" _valid_props = { "autocolorscale", "cauto", "cmax", "cmid", "cmin", "color", "coloraxis", "colorbar", "colorscale", "colorsrc", "cornerradius", "line", "opacity", "opacitysrc", "pattern", "reversescale", "showscale", } @property def autocolorscale(self): """ Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if in `marker.color` is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. The 'autocolorscale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["autocolorscale"] @autocolorscale.setter def autocolorscale(self, val): self["autocolorscale"] = val @property def cauto(self): """ Determines whether or not the color domain is computed with respect to the input data (here in `marker.color`) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if in `marker.color` is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. The 'cauto' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["cauto"] @cauto.setter def cauto(self, val): self["cauto"] = val @property def cmax(self): """ Sets the upper bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmin` must be set as well. The 'cmax' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmax"] @cmax.setter def cmax(self, val): self["cmax"] = val @property def cmid(self): """ Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color`. Has no effect when `marker.cauto` is `false`. The 'cmid' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmid"] @cmid.setter def cmid(self, val): self["cmid"] = val @property def cmin(self): """ Sets the lower bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmax` must be set as well. The 'cmin' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmin"] @cmin.setter def cmin(self, val): self["cmin"] = val @property def color(self): """ Sets the marker color. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.cmin` and `marker.cmax` if set. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A number that will be interpreted as a color according to histogram.marker.colorscale - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def coloraxis(self): """ Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. The 'coloraxis' property is an identifier of a particular subplot, of type 'coloraxis', that may be specified as the string 'coloraxis' optionally followed by an integer >= 1 (e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.) Returns ------- str """ return self["coloraxis"] @coloraxis.setter def coloraxis(self, val): self["coloraxis"] = val @property def colorbar(self): """ The 'colorbar' property is an instance of ColorBar that may be specified as: - An instance of :class:`plotly.graph_objs.histogram.marker.ColorBar` - A dict of string/value properties that will be passed to the ColorBar constructor Returns ------- plotly.graph_objs.histogram.marker.ColorBar """ return self["colorbar"] @colorbar.setter def colorbar(self, val): self["colorbar"] = val @property def colorscale(self): """ Sets the colorscale. Has an effect only if in `marker.color` is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,B luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic ,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd. The 'colorscale' property is a colorscale and may be specified as: - A list of colors that will be spaced evenly to create the colorscale. Many predefined colorscale lists are included in the sequential, diverging, and cyclical modules in the plotly.colors package. - A list of 2-element lists where the first element is the normalized color level value (starting at 0 and ending at 1), and the second item is a valid color string. (e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']]) - One of the following named colorscales: ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance', 'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg', 'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl', 'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric', 'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys', 'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet', 'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges', 'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']. Appending '_r' to a named colorscale reverses it. Returns ------- str """ return self["colorscale"] @colorscale.setter def colorscale(self, val): self["colorscale"] = val @property def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for `color`. The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"] @colorsrc.setter def colorsrc(self, val): self["colorsrc"] = val @property def cornerradius(self): """ Sets the rounding of corners. May be an integer number of pixels, or a percentage of bar width (as a string ending in %). Defaults to `layout.barcornerradius`. In stack or relative barmode, the first trace to set cornerradius is used for the whole stack. The 'cornerradius' property accepts values of any type Returns ------- Any """ return self["cornerradius"] @cornerradius.setter def cornerradius(self, val): self["cornerradius"] = val @property def line(self): """ The 'line' property is an instance of Line that may be specified as: - An instance of :class:`plotly.graph_objs.histogram.marker.Line` - A dict of string/value properties that will be passed to the Line constructor Returns ------- plotly.graph_objs.histogram.marker.Line """ return self["line"] @line.setter def line(self, val): self["line"] = val @property def opacity(self): """ Sets the opacity of the bars. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["opacity"] @opacity.setter def opacity(self, val): self["opacity"] = val @property def opacitysrc(self): """ Sets the source reference on Chart Studio Cloud for `opacity`. The 'opacitysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["opacitysrc"] @opacitysrc.setter def opacitysrc(self, val): self["opacitysrc"] = val @property def pattern(self): """ Sets the pattern within the marker. The 'pattern' property is an instance of Pattern that may be specified as: - An instance of :class:`plotly.graph_objs.histogram.marker.Pattern` - A dict of string/value properties that will be passed to the Pattern constructor Returns ------- plotly.graph_objs.histogram.marker.Pattern """ return self["pattern"] @pattern.setter def pattern(self, val): self["pattern"] = val @property def reversescale(self): """ Reverses the color mapping if true. Has an effect only if in `marker.color` is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. The 'reversescale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["reversescale"] @reversescale.setter def reversescale(self, val): self["reversescale"] = val @property def showscale(self): """ Determines whether or not a colorbar is displayed for this trace. Has an effect only if in `marker.color` is set to a numerical array. The 'showscale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showscale"] @showscale.setter def showscale(self, val): self["showscale"] = val @property def _prop_descriptions(self): return """\ autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if in `marker.color` is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here in `marker.color`) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if in `marker.color` is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color`. Has no effect when `marker.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmax` must be set as well. color Sets the marker color. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.cmin` and `marker.cmax` if set. coloraxis Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. colorbar :class:`plotly.graph_objects.histogram.marker.ColorBar` instance or dict with compatible properties colorscale Sets the colorscale. Has an effect only if in `marker.color` is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,Bluered,Blues,Cividis,Earth,E lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd Bu,Reds,Viridis,YlGnBu,YlOrRd. colorsrc Sets the source reference on Chart Studio Cloud for `color`. cornerradius Sets the rounding of corners. May be an integer number of pixels, or a percentage of bar width (as a string ending in %). Defaults to `layout.barcornerradius`. In stack or relative barmode, the first trace to set cornerradius is used for the whole stack. line :class:`plotly.graph_objects.histogram.marker.Line` instance or dict with compatible properties opacity Sets the opacity of the bars. opacitysrc Sets the source reference on Chart Studio Cloud for `opacity`. pattern Sets the pattern within the marker. reversescale Reverses the color mapping if true. Has an effect only if in `marker.color` is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. showscale Determines whether or not a colorbar is displayed for this trace. Has an effect only if in `marker.color` is set to a numerical array. """ def __init__( self, arg=None, autocolorscale=None, cauto=None, cmax=None, cmid=None, cmin=None, color=None, coloraxis=None, colorbar=None, colorscale=None, colorsrc=None, cornerradius=None, line=None, opacity=None, opacitysrc=None, pattern=None, reversescale=None, showscale=None, **kwargs, ): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram.Marker` autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if in `marker.color` is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here in `marker.color`) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if in `marker.color` is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color`. Has no effect when `marker.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if in `marker.color` is set to a numerical array. Value should have the same units as in `marker.color` and if set, `marker.cmax` must be set as well. color Sets the marker color. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.cmin` and `marker.cmax` if set. coloraxis Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. colorbar :class:`plotly.graph_objects.histogram.marker.ColorBar` instance or dict with compatible properties colorscale Sets the colorscale. Has an effect only if in `marker.color` is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,Bluered,Blues,Cividis,Earth,E lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd Bu,Reds,Viridis,YlGnBu,YlOrRd. colorsrc Sets the source reference on Chart Studio Cloud for `color`. cornerradius Sets the rounding of corners. May be an integer number of pixels, or a percentage of bar width (as a string ending in %). Defaults to `layout.barcornerradius`. In stack or relative barmode, the first trace to set cornerradius is used for the whole stack. line :class:`plotly.graph_objects.histogram.marker.Line` instance or dict with compatible properties opacity Sets the opacity of the bars. opacitysrc Sets the source reference on Chart Studio Cloud for `opacity`. pattern Sets the pattern within the marker. reversescale Reverses the color mapping if true. Has an effect only if in `marker.color` is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. showscale Determines whether or not a colorbar is displayed for this trace. Has an effect only if in `marker.color` is set to a numerical array. Returns ------- Marker """ super().__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram.Marker`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("autocolorscale", arg, autocolorscale) self._set_property("cauto", arg, cauto) self._set_property("cmax", arg, cmax) self._set_property("cmid", arg, cmid) self._set_property("cmin", arg, cmin) self._set_property("color", arg, color) self._set_property("coloraxis", arg, coloraxis) self._set_property("colorbar", arg, colorbar) self._set_property("colorscale", arg, colorscale) self._set_property("colorsrc", arg, colorsrc) self._set_property("cornerradius", arg, cornerradius) self._set_property("line", arg, line) self._set_property("opacity", arg, opacity) self._set_property("opacitysrc", arg, opacitysrc) self._set_property("pattern", arg, pattern) self._set_property("reversescale", arg, reversescale) self._set_property("showscale", arg, showscale) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Marker
python
kamyu104__LeetCode-Solutions
Python/minimum-size-subarray-sum.py
{ "start": 565, "end": 1566 }
class ____(object): # @param {integer} s # @param {integer[]} nums # @return {integer} def minSubArrayLen(self, s, nums): min_size = float("inf") sum_from_start = [n for n in nums] for i in xrange(len(sum_from_start) - 1): sum_from_start[i + 1] += sum_from_start[i] for i in xrange(len(sum_from_start)): end = self.binarySearch(lambda x, y: x <= y, sum_from_start, \ i, len(sum_from_start), \ sum_from_start[i] - nums[i] + s) if end < len(sum_from_start): min_size = min(min_size, end - i + 1) return min_size if min_size != float("inf") else 0 def binarySearch(self, compare, A, start, end, target): while start < end: mid = start + (end - start) / 2 if compare(target, A[mid]): end = mid else: start = mid + 1 return start
Solution2
python
coleifer__peewee
peewee.py
{ "start": 144983, "end": 145763 }
class ____(object): def __init__(self, db): self.db = db def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with _manual(self.db): return fn(*args, **kwargs) return inner def __enter__(self): top = self.db.top_transaction() if top is not None and not isinstance(top, _manual): raise ValueError('Cannot enter manual commit block while a ' 'transaction is active.') self.db.push_transaction(self) def __exit__(self, exc_type, exc_val, exc_tb): if self.db.pop_transaction() is not self: raise ValueError('Transaction stack corrupted while exiting ' 'manual commit block.')
_manual
python
ray-project__ray
python/ray/serve/_private/logging_utils.py
{ "start": 18350, "end": 19041 }
class ____: """ Context manager to manage logging behaviors within a particular block, such as: 1) Overriding logging level Source (python3 official documentation) https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging # noqa: E501 """ def __init__(self, logger, level=None): self.logger = logger self.level = level def __enter__(self): if self.level is not None: self.old_level = self.logger.level self.logger.setLevel(self.level) def __exit__(self, et, ev, tb): if self.level is not None: self.logger.setLevel(self.old_level)
LoggingContext
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py
{ "start": 20807, "end": 21059 }
class ____(graphene.ObjectType): class Meta: name = "EventConnection" events = non_null_list(GrapheneDagsterRunEvent) cursor = graphene.NonNull(graphene.String) hasMore = graphene.NonNull(graphene.Boolean)
GrapheneEventConnection
python
django__django
tests/admin_widgets/test_autocomplete_widget.py
{ "start": 311, "end": 769 }
class ____(forms.ModelForm): class Meta: model = Album fields = ["band", "featuring"] widgets = { "band": AutocompleteSelect( Album._meta.get_field("band"), admin.site, attrs={"class": "my-class"}, ), "featuring": AutocompleteSelect( Album._meta.get_field("featuring"), admin.site, ), }
AlbumForm
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/shortcuts/progress_bar/formatters.py
{ "start": 7564, "end": 8458 }
class ____(Formatter): """ Display the time left. """ template = HTML("<time-left>{time_left}</time-left>") unknown = "?:??:??" def format( self, progress_bar: ProgressBar, progress: ProgressBarCounter[object], width: int, ) -> AnyFormattedText: time_left = progress.time_left if time_left is not None: formatted_time_left = _format_timedelta(time_left) else: formatted_time_left = self.unknown return self.template.format(time_left=formatted_time_left.rjust(width)) def get_width(self, progress_bar: ProgressBar) -> AnyDimension: all_values = [ len(_format_timedelta(c.time_left)) if c.time_left is not None else 7 for c in progress_bar.counters ] if all_values: return max(all_values) return 0
TimeLeft
python
pyinstaller__pyinstaller
tests/unit/test_modulegraph/test_basic.py
{ "start": 204, "end": 1312 }
class ____(unittest.TestCase): def testNone(self): mg = modulegraph.ModuleGraph() # empty packagepath m = DummyModule(None) sub_ms = [] for sm in mg._find_all_submodules(m): sub_ms.append(sm) self.assertEqual(sub_ms, []) def testSimple(self): mg = modulegraph.ModuleGraph() # a string does not break anything although it is split into its characters # BUG: "/hi/there" will read "/" m = DummyModule("xyz") sub_ms = [] for sm in mg._find_all_submodules(m): sub_ms.append(sm) self.assertEqual(sub_ms, []) def testSlashes(self): # a string does not break anything although it is split into its characters # BUG: "/xyz" will read "/" so this one already triggers missing itertools mg = modulegraph.ModuleGraph() m = DummyModule("/xyz") sub_ms = [] for sm in mg._find_all_submodules(m): sub_ms.append(sm) self.assertEqual(sub_ms, []) if __name__ == '__main__': unittest.main()
FindAllSubmodulesTestCase
python
dagster-io__dagster
python_modules/libraries/dagster-shared/dagster_shared_tests/test_record.py
{ "start": 8596, "end": 8654 }
class ____: name: str age: int @record_custom
Person
python
getsentry__sentry
tests/sentry/integrations/api/endpoints/test_organization_repository_commits.py
{ "start": 170, "end": 1028 }
class ____(APITestCase): def test_simple(self) -> None: self.login_as(user=self.user) org = self.create_organization(owner=self.user, name="baz") repo = Repository.objects.create(name="example", organization_id=org.id) commit = Commit.objects.create(repository_id=repo.id, organization_id=org.id, key="a" * 40) repo2 = Repository.objects.create(name="example2", organization_id=org.id) Commit.objects.create(repository_id=repo2.id, organization_id=org.id, key="b" * 40) url = reverse("sentry-api-0-organization-repository-commits", args=[org.slug, repo.id]) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["id"] == commit.key
OrganizationRepositoryCommitsTest
python
huggingface__transformers
src/transformers/models/speecht5/modeling_speecht5.py
{ "start": 56808, "end": 57999 }
class ____(SpeechT5PreTrainedModel): """ Wrapper around SpeechT5Encoder that applies SpeechT5SpeechEncoderPrenet to convert the audio waveform data to hidden features. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.prenet = SpeechT5SpeechEncoderPrenet(config) self.wrapped_encoder = SpeechT5Encoder(config) # Initialize weights and apply final processing self.post_init() def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: hidden_states, attention_mask = self.prenet(input_values, attention_mask) outputs = self.wrapped_encoder( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs
SpeechT5EncoderWithSpeechPrenet
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor6.py
{ "start": 1486, "end": 1772 }
class ____(Model): pass reveal_type(ForeignKey(Author, null=False), expected_text="ForeignKey[Author]") reveal_type(ForeignKey(Author, null=True), expected_text="ForeignKey[Author | None]") _T3 = TypeVar("_T3") _T4 = TypeVar("_T4") _S1 = TypeVar("_S1") _S2 = TypeVar("_S2")
Author
python
huggingface__transformers
src/transformers/models/layoutxlm/tokenization_layoutxlm.py
{ "start": 8713, "end": 47283 }
class ____(TokenizersBackend): """ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab (`list[tuple[str, float]]`, *optional*): Vocabulary for the tokenizer as a list of (token, score) tuples. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = None def __init__( self, vocab_file=None, vocab=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, add_prefix_space=True, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.add_prefix_space = add_prefix_space # Build vocab from list of tuples if provided, else use default # Handle both list of tuples (when creating) and dict (when loading) if vocab is not None: if isinstance(vocab, dict): # Convert dict to list of tuples self._vocab = [(token, score) for token, score in vocab.items()] else: self._vocab = vocab else: self._vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] if mask_token not in [v[0] for v in self._vocab]: self._vocab.append((str(mask_token), 0.0)) # Create the Unigram tokenizer self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=3, byte_fallback=False)) # Set up normalizer (strip right, replace multiple spaces) self._tokenizer.normalizer = normalizers.Sequence( [ normalizers.Strip(left=False, right=True), normalizers.Replace(Regex(" {2,}"), "▁"), ] ) # Set up pre_tokenizer (Metaspace) prepend_scheme = _get_prepend_scheme(add_prefix_space, self) self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme) # Set up decoder self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme) # Set up post_processor for XLM-RoBERTa style # Get token IDs cls_token_id = self._get_token_id(str(cls_token)) sep_token_id = self._get_token_id(str(sep_token)) self._tokenizer.post_processor = processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", cls_token_id), ("</s>", sep_token_id), ], ) tokenizer_object = self._tokenizer # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword super().__init__( tokenizer_object=tokenizer_object, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, vocab_file=vocab_file, vocab=vocab, add_prefix_space=add_prefix_space, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) self.vocab_file = vocab_file def _get_token_id(self, token: str) -> int: """Helper to get token ID from vocab.""" for i, (t, _) in enumerate(self._vocab): if t == token: return i return 3 # unk_id def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. """ # Get the padding and truncation strategies padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. """ # Get the padding and truncation strategies padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`list[list[int]]`, `list[list[list[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`list[int]`, `list[list[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `list[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `list[str]` (single pretokenized example), " "or `list[list[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `list[str]` (single pretokenized example), " "or `list[list[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]: batched_input = [(text, pair)] if pair else [text] # Handle split_special_tokens parameter # If split_special_tokens=True, we want encode_special_tokens=True (split the special tokens) # If split_special_tokens=False, we want encode_special_tokens=False (keep special tokens whole) split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens) self._tokenizer.encode_special_tokens = split_special_tokens encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: tuple[ # list[dict[str, list[list[int]]]] or list[dict[str, 2D-Tensor]], # list[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] __all__ = ["LayoutXLMTokenizer"]
LayoutXLMTokenizer
python
ethereum__web3.py
web3/exceptions.py
{ "start": 905, "end": 1070 }
class ____(Web3Exception, ValueError): """ A web3.py exception wrapper for `ValueError`, for better control over exception handling. """
Web3ValueError
python
huggingface__transformers
tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py
{ "start": 25396, "end": 35836 }
class ____(unittest.TestCase): # test that non-standard generation works # test generation of: SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech def setUp(self): self.speech_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.text_model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.tmpdirname = tempfile.mkdtemp() def update_generation(self, model): text_lang_code_to_id = { "fra": 4, "eng": 4, "rus": 4, } speech_lang_code_to_id = { "fra": 4, "eng": 4, } id_to_text = {str(i): "a" for i in range(model.config.vocab_size)} id_to_text["0"] = "ab" id_to_text["1"] = "_b" id_to_text["3"] = "," id_to_text["4"] = "_cd" char_to_id = {char: i for (i, char) in enumerate("abcd")} generation_config = copy.deepcopy(model.generation_config) generation_config.__setattr__("text_decoder_lang_to_code_id", text_lang_code_to_id) generation_config.__setattr__("t2u_lang_code_to_id", speech_lang_code_to_id) generation_config.__setattr__("vocoder_lang_code_to_id", speech_lang_code_to_id) generation_config.__setattr__("id_to_text", id_to_text) generation_config.__setattr__("char_to_id", char_to_id) generation_config.__setattr__("eos_token_id", 0) generation_config._from_model_config = False model.generation_config = generation_config def prepare_text_input(self, tgt_lang): config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_dict = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": tgt_lang, "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_dict = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_and_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_speech = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_text = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_speech, input_text def factory_generation_speech_test(self, model, inputs): set_seed(0) output = model.generate(**inputs) return output def test_generation_languages(self): config, input_text_rus = self.prepare_text_input(tgt_lang="rus") model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.to(torch_device) model.eval() # make sure that generating speech, with a language that is only supported for text translation, raises error with self.assertRaises(ValueError): model.generate(**input_text_rus) # make sure that generating text only works model.generate(**input_text_rus, generate_speech=False) # make sure it works for languages supported by both output modalities config, input_text_eng = self.prepare_text_input(tgt_lang="eng") model.generate(**input_text_eng) model.generate(**input_text_eng, generate_speech=False) def test_speech_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() from transformers.testing_utils import set_config_for_less_flaky_test, set_model_for_less_flaky_test set_config_for_less_flaky_test(config) model = SeamlessM4Tv2Model(config=config) set_model_for_less_flaky_test(model) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToSpeech.from_pretrained(self.tmpdirname) # Even if this component is loaded after `model.save_pretrained` which is after # `set_model_for_less_flaky_test(model)`, we still need to apply `set_model_for_less_flaky_test` here as the # `eps` attribute in the model's norm layers is not set from the config. set_model_for_less_flaky_test(text_model) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) speech_model = SeamlessM4Tv2ForSpeechToSpeech.from_pretrained(self.tmpdirname) # Even if this component is loaded after `model.save_pretrained` which is after # `set_model_for_less_flaky_test(model)`, we still need to apply `set_model_for_less_flaky_test` here as the # `eps` attribute in the model's norm layers is not set from the config. set_model_for_less_flaky_test(speech_model) self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text[0].ravel().tolist()) self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) # test same speech output from input text # assertTrue because super long list makes this hang in case of failure self.assertTrue( output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), "Speech generated was different", ) self.assertTrue( output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), "Speech generated was different", ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() # to return speech input_speech["generate_speech"] = False input_text["generate_speech"] = False model = SeamlessM4Tv2Model(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) # other models don't need it input_speech.pop("generate_speech") input_text.pop("generate_speech") state_dict = model.state_dict() text_model = SeamlessM4Tv2ForTextToText.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() for name, tensor in text_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist()) output_text = self.factory_generation_speech_test(text_model, input_text) speech_model = SeamlessM4Tv2ForSpeechToText.from_pretrained(self.tmpdirname) for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() output_speech = self.factory_generation_speech_test(speech_model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) # test same speech output from input text self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech.ravel().tolist()) def test_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["num_beams"] = 3 input_speech["do_sample"] = True input_speech["temperature"] = 0.5 input_speech["num_return_sequences"] = 3 input_text["num_beams"] = 3 input_text["do_sample"] = True input_text["temperature"] = 0.5 input_text["num_return_sequences"] = 3 for model_class in [SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_speech) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_speech["input_features"].shape[0]) for model_class in [SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_text) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_text["input_ids"].shape[0]) @require_torch
SeamlessM4Tv2GenerationTest
python
tiangolo__fastapi
docs_src/generate_clients/tutorial001.py
{ "start": 159, "end": 519 }
class ____(BaseModel): message: str @app.post("/items/", response_model=ResponseMessage) async def create_item(item: Item): return {"message": "item received"} @app.get("/items/", response_model=List[Item]) async def get_items(): return [ {"name": "Plumbus", "price": 3}, {"name": "Portal Gun", "price": 9001}, ]
ResponseMessage
python
pytransitions__transitions
transitions/extensions/factory.py
{ "start": 3469, "end": 3658 }
class ____(GraphMachine, AsyncMachine): """A machine that supports asynchronous event/callback processing with Graphviz support.""" transition_cls = AsyncTransition
AsyncGraphMachine
python
streamlit__streamlit
lib/streamlit/elements/arrow.py
{ "start": 5673, "end": 9726 }
class ____: """DataframeSelectionSerde is used to serialize and deserialize the dataframe selection state.""" def deserialize(self, ui_value: str | None) -> DataframeState: empty_selection_state: DataframeState = { "selection": { "rows": [], "columns": [], "cells": [], }, } selection_state: DataframeState = ( empty_selection_state if ui_value is None else json.loads(ui_value) ) if "selection" not in selection_state: selection_state = empty_selection_state if "rows" not in selection_state["selection"]: selection_state["selection"]["rows"] = [] if "columns" not in selection_state["selection"]: selection_state["selection"]["columns"] = [] if "cells" not in selection_state["selection"]: selection_state["selection"]["cells"] = [] else: # Explicitly convert all cells to a tuple (from list). # This is necessary since there isn't a concept of tuples in JSON # The format that the data is transferred to the backend. selection_state["selection"]["cells"] = [ tuple(cell) # type: ignore for cell in selection_state["selection"]["cells"] ] return cast("DataframeState", AttributeDictionary(selection_state)) def serialize(self, state: DataframeState) -> str: return json.dumps(state) def parse_selection_mode( selection_mode: SelectionMode | Iterable[SelectionMode], ) -> set[ArrowProto.SelectionMode.ValueType]: """Parse and check the user provided selection modes.""" if isinstance(selection_mode, str): # Only a single selection mode was passed selection_mode_set = {selection_mode} else: # Multiple selection modes were passed selection_mode_set = set(selection_mode) if not selection_mode_set.issubset(_SELECTION_MODES): raise StreamlitAPIException( f"Invalid selection mode: {selection_mode}. " f"Valid options are: {_SELECTION_MODES}" ) if selection_mode_set.issuperset({"single-row", "multi-row"}): raise StreamlitAPIException( "Only one of `single-row` or `multi-row` can be selected as selection mode." ) if selection_mode_set.issuperset({"single-column", "multi-column"}): raise StreamlitAPIException( "Only one of `single-column` or `multi-column` can be selected as selection mode." ) if selection_mode_set.issuperset({"single-cell", "multi-cell"}): raise StreamlitAPIException( "Only one of `single-cell` or `multi-cell` can be selected as selection mode." ) parsed_selection_modes = [] for mode in selection_mode_set: if mode == "single-row": parsed_selection_modes.append(ArrowProto.SelectionMode.SINGLE_ROW) elif mode == "multi-row": parsed_selection_modes.append(ArrowProto.SelectionMode.MULTI_ROW) elif mode == "single-column": parsed_selection_modes.append(ArrowProto.SelectionMode.SINGLE_COLUMN) elif mode == "multi-column": parsed_selection_modes.append(ArrowProto.SelectionMode.MULTI_COLUMN) elif mode == "single-cell": parsed_selection_modes.append(ArrowProto.SelectionMode.SINGLE_CELL) elif mode == "multi-cell": parsed_selection_modes.append(ArrowProto.SelectionMode.MULTI_CELL) return set(parsed_selection_modes) def parse_border_mode( border: bool | Literal["horizontal"], ) -> ArrowProto.BorderMode.ValueType: """Parse and check the user provided border mode.""" if isinstance(border, bool): return ArrowProto.BorderMode.ALL if border else ArrowProto.BorderMode.NONE if border == "horizontal": return ArrowProto.BorderMode.HORIZONTAL raise StreamlitValueError("border", ["True", "False", "'horizontal'"])
DataframeSelectionSerde
python
astropy__astropy
astropy/units/tests/test_quantity_non_ufuncs.py
{ "start": 38362, "end": 41702 }
class ____(InvariantUnitTestSetup): def setup_method(self): super().setup_method() self.q[1, 1] = np.nan def test_nanmax(self): self.check(np.nanmax) def test_nanmin(self): self.check(np.nanmin) def test_nanargmin(self): out = np.nanargmin(self.q) expected = np.nanargmin(self.q.value) assert out == expected def test_nanargmax(self): out = np.nanargmax(self.q) expected = np.nanargmax(self.q.value) assert out == expected def test_nanmean(self): self.check(np.nanmean) @pytest.mark.parametrize("axis", [None, 0, 1, -1]) def test_nanmedian(self, axis): self.check(np.nanmedian, axis=axis) def test_nanmedian_out(self): out = np.empty_like(self.q) o = np.nanmedian(self.q, out=out) assert o is out assert np.all(o == np.nanmedian(self.q)) def test_nansum(self): self.check(np.nansum) def test_nancumsum(self): self.check(np.nancumsum) def test_nanstd(self): self.check(np.nanstd) @pytest.mark.parametrize( "out_init", [ pytest.param(u.Quantity(-1, "m"), id="out with correct unit"), # this should work too: out.unit will be overridden pytest.param(u.Quantity(-1), id="out with a different unit"), ], ) def test_nanstd_out(self, out_init): out = out_init.copy() o = np.nanstd(self.q, out=out) assert o is out assert o == np.nanstd(self.q) # Also check array input, Quantity output. out = out_init.copy() o2 = np.nanstd(self.q.value, out=out) assert o2 is out assert o2.unit == u.dimensionless_unscaled assert o2 == np.nanstd(self.q.value) def test_nanvar(self): out = np.nanvar(self.q) expected = np.nanvar(self.q.value) * self.q.unit**2 assert np.all(out == expected) @pytest.mark.parametrize( "out_init", [ pytest.param(u.Quantity(-1, "m"), id="out with correct unit"), # this should work too: out.unit will be overridden pytest.param(u.Quantity(-1), id="out with a different unit"), ], ) def test_nanvar_out(self, out_init): out = out_init.copy() o = np.nanvar(self.q, out=out) assert o is out assert o == np.nanvar(self.q) # Also check array input, Quantity output. out = out_init.copy() o2 = np.nanvar(self.q.value, out=out) assert o2 is out assert o2.unit == u.dimensionless_unscaled assert o2 == np.nanvar(self.q.value) def test_nanprod(self): with pytest.raises(u.UnitsError): np.nanprod(self.q) def test_nancumprod(self): with pytest.raises(u.UnitsError): np.nancumprod(self.q) def test_nanquantile(self): self.check(np.nanquantile, 0.5) o = np.nanquantile(self.q, 50 * u.percent) expected = np.nanquantile(self.q.value, 0.5) * u.m assert np.all(o == expected) def test_nanpercentile(self): self.check(np.nanpercentile, 0.5) o = np.nanpercentile(self.q, 0.5 * u.one) expected = np.nanpercentile(self.q.value, 50) * u.m assert np.all(o == expected)
TestNanFunctions
python
pytorch__pytorch
torch/_dynamo/eval_frame.py
{ "start": 4995, "end": 11768 }
class ____: stance: str = "default" skip_guard_eval_unsafe: bool = False backend: Union[str, Callable[..., Any], None] = None _stance = DynamoStance() def _set_stance(stance: DynamoStance) -> DynamoStance: global _stance from torch._C._dynamo.eval_frame import get_eval_frame_callback callback = get_eval_frame_callback() if callback is not False and callback is not None: raise RuntimeError("attempted to set_stance in a torch.compile region") prior = _stance _stance = stance return prior _set_stance._dynamo_forbidden = True # type: ignore[attr-defined] _EXAMPLE_INPUTS: Optional[dict[str, list[Any]]] = None def get_example_inputs(key: str) -> list[Any]: global _EXAMPLE_INPUTS if _EXAMPLE_INPUTS is None: _EXAMPLE_INPUTS = {} if key not in _EXAMPLE_INPUTS: _EXAMPLE_INPUTS[key] = [] return _EXAMPLE_INPUTS[key] def _callback_from_stance(callback: DynamoCallback) -> DynamoCallback: if _stance.stance == "default": # force_backend if _stance.backend is not None and callback not in (False, None): callback = _create_wrapped_callback(get_compiler_fn(_stance.backend)) return callback elif _stance.stance == "eager_then_compile": if callback not in (False, None): return _create_delayed_compile_callback(callback, _stance.stance) return callback elif _stance.stance == "aot_eager_then_compile": if callback not in (False, None): return _create_delayed_compile_callback(callback, _stance.stance) return callback elif _stance.stance == "force_eager": # disable return None elif _stance.stance == "eager_on_recompile": # run mode return False elif _stance.stance == "fail_on_recompile": if callback in (False, None): return callback def fail_callback( frame: DynamoFrameType, *args: Any, **kwargs: Any ) -> ConvertFrameReturn: if trace_rules.check(frame.f_code): return ConvertFrameReturn() if not convert_frame.has_tensor_in_frame(frame): return ConvertFrameReturn() from torch._C._dynamo.eval_frame import ( _debug_get_cache_entry_list, _debug_get_precompile_entries, ) from torch._dynamo.guards import get_and_maybe_log_recompilation_reasons message = ( "Detected recompile when torch.compile stance is 'fail_on_recompile'. " + f"filename: '{frame.f_code.co_filename}', " + f"function name: '{frame.f_code.co_name}', " + f"line number: {frame.f_lineno}" ) cache_entries = _debug_get_cache_entry_list(frame.f_code) if cache_entries: reasons = get_and_maybe_log_recompilation_reasons( cache_entries[0], frame, innermost_fn(callback), skip_logging=True ) if reasons: failures = textwrap.indent("\n".join(reasons), "- ") guard_failure_details = ( f"triggered by the following guard failure(s):\n{failures}" ) message += f"\n{textwrap.indent(guard_failure_details, ' ')}" precompile_entries = _debug_get_precompile_entries(frame.f_code) if len(precompile_entries) > 0: message += "\nFailed on the following precompiled guards: " for entry in precompile_entries: message += f"\n{entry.guard_manager}{entry.guard_manager.check_verbose(frame.f_locals)}" # type: ignore[attr-defined] raise RuntimeError(message) # to prevent cache miss due to different backend fail_callback._torchdynamo_orig_backend = callback # type: ignore[attr-defined] return fail_callback else: raise RuntimeError(f"invalid torch.compile stance '{_stance}'") def _create_wrapped_callback( compiler_fn: CompilerFn, ) -> convert_frame.CatchErrorsWrapper: hooks = Hooks() return convert_frame.catch_errors_wrapper( convert_frame.convert_frame( # type: ignore[arg-type] compiler_fn, hooks, ), hooks, ) def _get_or_add_example_inputs(frame: DynamoFrameType) -> list[Any]: key = frame.f_code.co_filename + str(frame.f_code.co_firstlineno) example_inputs = get_example_inputs(key) if len(example_inputs) < 2: example_inputs.append(clone_and_convert_to_meta(frame.f_locals)) return example_inputs def _create_delayed_compile_callback( callback: DynamoCallback, stance: str ) -> Callable[..., Any]: def callback_fn(*args: Any, **kwargs: Any) -> convert_frame.ConvertFrameReturn: frame = args[0] example_inputs = _get_or_add_example_inputs(frame) if len(example_inputs) == 1: if stance == "eager_then_compile": return ConvertFrameReturn( frame_exec_strategy=FrameExecStrategy( FrameAction.DEFAULT, FrameAction.DEFAULT ) ) elif stance == "aot_eager_then_compile": aot_eager_fn = get_compiler_fn("aot_eager") return _create_wrapped_callback(aot_eager_fn)(*args, **kwargs) dynamism = track_dynamism_across_examples(example_inputs) code_context.get_context(frame.f_code)["dynamism"] = dynamism compiler_fn = callback._torchdynamo_orig_backend._torchdynamo_orig_backend # type: ignore[union-attr] return _create_wrapped_callback(compiler_fn)(*args, **kwargs) # to prevent cache miss due to different backend callback_fn._torchdynamo_orig_backend = callback # type: ignore[attr-defined] return callback_fn def _is_skip_guard_eval_unsafe_stance() -> bool: return _stance.skip_guard_eval_unsafe def _reset_guarded_backend_cache() -> None: global cached_backends for backend in cached_backends.values(): if hasattr(backend, "reset"): backend.reset() cached_backends.clear() DONT_WRAP_FILES = { # For tracing into fx modules inspect.getsourcefile(GraphModule), join(dirname(dirname(__file__)), "onnx/_internal/fx/dynamo_graph_extractor.py"), } def _debug_get_cache_entry_list( code: Union[types.CodeType, Callable[..., Any]], ) -> list[CacheEntry]: """ Given a code object or a callable object, retrieve the cache entries stored in this code. """ if callable(code): code = code.__code__ return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)
DynamoStance
python
doocs__leetcode
solution/2400-2499/2418.Sort the People/Solution2.py
{ "start": 0, "end": 172 }
class ____: def sortPeople(self, names: List[str], heights: List[int]) -> List[str]: return [name for _, name in sorted(zip(heights, names), reverse=True)]
Solution
python
dagster-io__dagster
conftest.py
{ "start": 491, "end": 5344 }
class ____: scope: str name: str @lru_cache def buildkite_quarantined_tests(annotation) -> set[TestId]: quarantined_tests = set() if os.getenv("BUILDKITE") or os.getenv("LOCAL_BUILDKITE_QUARANTINE"): # Run our full test suite - warts and all - on the release branch if os.getenv("BUILDKITE_BRANCH", "").startswith("release-"): return quarantined_tests try: import requests token = os.getenv("BUILDKITE_TEST_QUARANTINE_TOKEN") org_slug = os.getenv("BUILDKITE_ORGANIZATION_SLUG") suite_slug = os.getenv("BUILDKITE_TEST_SUITE_SLUG") headers = {"Authorization": f"Bearer {token}"} url = f"https://api.buildkite.com/v2/analytics/organizations/{org_slug}/suites/{suite_slug}/tests/{annotation}" start_time = time.time() timeout = 10 while url and time.time() - start_time < timeout: response = requests.get(url, headers=headers) response.raise_for_status() for test in response.json(): scope = test.get("scope", "") name = test.get("name", "") quarantined_test = TestId(scope, name) quarantined_tests.add(quarantined_test) link_header = response.headers.get("Link", "") next_url = None for part in link_header.split(","): if 'rel="next"' in part: next_url = part[part.find("<") + 1 : part.find(">")] break url = next_url except Exception as e: print(e) # noqa return quarantined_tests def pytest_addoption(parser): parser.addoption( "--split", action="store", default=None, help="Split test selection (e.g., 0/3)" ) def pytest_configure(config): # Create a section break in the logs any time Buildkite invokes pytest # https://buildkite.com/docs/pipelines/managing-log-output # https://docs.pytest.org/en/7.1.x/reference/reference.html?highlight=pytest_configure#pytest.hookspec.pytest_configure if os.getenv("BUILDKITE"): print("+++ Running :pytest: PyTest") # noqa # https://docs.pytest.org/en/7.1.x/example/markers.html#custom-marker-and-command-line-option-to-control-test-runs config.addinivalue_line( "markers", "integration: mark test to skip if DISABLE_INTEGRATION_TESTS is set." ) def pytest_runtest_setup(item): # https://buildkite.com/docs/apis/rest-api/test-engine/quarantine#list-quarantined-tests # Buildkite Test Engine marks unreliable tests as muted and triages them out to owning teams to improve. # We pull this list of tests at the beginning of each pytest session and add soft xfail markers to each # quarantined test. try: muted = buildkite_quarantined_tests("muted") skipped = buildkite_quarantined_tests("skipped") if muted or skipped: # https://github.com/buildkite/test-collector-python/blob/6fba081a2844d6bdec8607942eee48a03d60cd40/src/buildkite_test_collector/pytest_plugin/buildkite_plugin.py#L22-L27 chunks = item.nodeid.split("::") scope = "::".join(chunks[:-1]) name = chunks[-1] test = TestId(scope, name) if test in muted: item.add_marker(pytest.mark.xfail(reason="Test muted in Buildkite.", strict=False)) if test in skipped: item.add_marker(pytest.skip(reason="Test skipped in Buildkite.")) except Exception as e: print(e) # noqa try: next(item.iter_markers("integration")) if os.getenv("CI_DISABLE_INTEGRATION_TESTS"): pytest.skip("Integration tests are disabled") except StopIteration: pass @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(config, items): """Split pytest collection. Example usage: pytest --split 1/2 # run half the tests pytest --split 2/2 # run the other half the tests """ split_option = config.getoption("--split") if not split_option: return try: k, n = map(int, split_option.split("/")) except ValueError: raise pytest.UsageError( "--split must be in the form numerator/denominator (e.g. --split=1/3)" ) if k <= 0: raise pytest.UsageError("--split numerator must be > 0") if k > n: raise pytest.UsageError("--split numerator must be smaller than denominator") total = len(items) start = total * (k - 1) // n end = total * k // n selected = items[start:end] deselected = items[:start] + items[end:] if deselected: config.hook.pytest_deselected(items=deselected) items[:] = selected
TestId
python
huggingface__transformers
src/transformers/models/m2m_100/modeling_m2m_100.py
{ "start": 2525, "end": 3015 }
class ____(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
M2M100ScaledWordEmbedding
python
apache__airflow
task-sdk/tests/task_sdk/definitions/test_dag.py
{ "start": 24338, "end": 29268 }
class ____: def test_cycle_empty(self): # test empty dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) assert not dag.check_cycle() def test_cycle_single_task(self): # test single task dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) with dag: DoNothingOperator(task_id="A") assert not dag.check_cycle() def test_semi_complex(self): dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # A -> B -> C # B -> D # E -> F with dag: create_cluster = DoNothingOperator(task_id="c") pod_task = DoNothingOperator(task_id="p") pod_task_xcom = DoNothingOperator(task_id="x") delete_cluster = DoNothingOperator(task_id="d") pod_task_xcom_result = DoNothingOperator(task_id="r") create_cluster >> pod_task >> delete_cluster create_cluster >> pod_task_xcom >> delete_cluster pod_task_xcom >> pod_task_xcom_result def test_cycle_no_cycle(self): # test no cycle dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # A -> B -> C # B -> D # E -> F with dag: op1 = DoNothingOperator(task_id="A") op2 = DoNothingOperator(task_id="B") op3 = DoNothingOperator(task_id="C") op4 = DoNothingOperator(task_id="D") op5 = DoNothingOperator(task_id="E") op6 = DoNothingOperator(task_id="F") op1.set_downstream(op2) op2.set_downstream(op3) op2.set_downstream(op4) op5.set_downstream(op6) assert not dag.check_cycle() def test_cycle_loop(self): # test self loop dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # A -> A with dag: op1 = DoNothingOperator(task_id="A") op1.set_downstream(op1) with pytest.raises(AirflowDagCycleException): assert not dag.check_cycle() def test_cycle_downstream_loop(self): # test downstream self loop dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # A -> B -> C -> D -> E -> E with dag: op1 = DoNothingOperator(task_id="A") op2 = DoNothingOperator(task_id="B") op3 = DoNothingOperator(task_id="C") op4 = DoNothingOperator(task_id="D") op5 = DoNothingOperator(task_id="E") op1.set_downstream(op2) op2.set_downstream(op3) op3.set_downstream(op4) op4.set_downstream(op5) op5.set_downstream(op5) with pytest.raises(AirflowDagCycleException): assert not dag.check_cycle() def test_cycle_large_loop(self): # large loop dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # A -> B -> C -> D -> E -> A with dag: start = DoNothingOperator(task_id="start") current = start for i in range(10000): next_task = DoNothingOperator(task_id=f"task_{i}") current.set_downstream(next_task) current = next_task current.set_downstream(start) with pytest.raises(AirflowDagCycleException): assert not dag.check_cycle() def test_cycle_arbitrary_loop(self): # test arbitrary loop dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) # E-> A -> B -> F -> A # -> C -> F with dag: op1 = DoNothingOperator(task_id="A") op2 = DoNothingOperator(task_id="B") op3 = DoNothingOperator(task_id="C") op4 = DoNothingOperator(task_id="E") op5 = DoNothingOperator(task_id="F") op1.set_downstream(op2) op1.set_downstream(op3) op4.set_downstream(op1) op3.set_downstream(op5) op2.set_downstream(op5) op5.set_downstream(op1) with pytest.raises(AirflowDagCycleException): assert not dag.check_cycle() def test_cycle_task_group_with_edge_labels(self): # Test a cycle is not detected when Labels are used between tasks in Task Groups. dag = DAG("dag", schedule=None, start_date=DEFAULT_DATE, default_args={"owner": "owner1"}) with dag: with TaskGroup(group_id="group"): op1 = DoNothingOperator(task_id="A") op2 = DoNothingOperator(task_id="B") op1 >> Label("label") >> op2 assert not dag.check_cycle()
TestCycleTester
python
MongoEngine__mongoengine
tests/queryset/test_queryset.py
{ "start": 1069, "end": 198039 }
class ____(unittest.TestCase): def setUp(self): connect(db="mongoenginetest") connect(db="mongoenginetest2", alias="test2") class PersonMeta(EmbeddedDocument): weight = IntField() class Person(Document): name = StringField() age = IntField() person_meta = EmbeddedDocumentField(PersonMeta) meta = {"allow_inheritance": True} Person.drop_collection() self.PersonMeta = PersonMeta self.Person = Person self.mongodb_version = get_mongodb_version() def test_initialisation(self): """Ensure that a QuerySet is correctly initialised by QuerySetManager.""" assert isinstance(self.Person.objects, QuerySet) assert ( self.Person.objects._collection.name == self.Person._get_collection_name() ) assert isinstance( self.Person.objects._collection, pymongo.collection.Collection ) def test_cannot_perform_joins_references(self): class BlogPost(Document): author = ReferenceField(self.Person) author2 = GenericReferenceField() # test addressing a field from a reference with pytest.raises(InvalidQueryError): list(BlogPost.objects(author__name="test")) # should fail for a generic reference as well with pytest.raises(InvalidQueryError): list(BlogPost.objects(author2__name="test")) def test_find(self): """Ensure that a query returns a valid set of results.""" user_a = self.Person.objects.create(name="User A", age=20) user_b = self.Person.objects.create(name="User B", age=30) # Find all people in the collection people = self.Person.objects assert people.count() == 2 results = list(people) assert isinstance(results[0], self.Person) assert isinstance(results[0].id, ObjectId) assert results[0] == user_a assert results[0].name == "User A" assert results[0].age == 20 assert results[1] == user_b assert results[1].name == "User B" assert results[1].age == 30 # Filter people by age people = self.Person.objects(age=20) assert people.count() == 1 person = next(people) assert person == user_a assert person.name == "User A" assert person.age == 20 def test_slicing_sets_empty_limit_skip(self): self.Person.objects.insert( [self.Person(name=f"User {i}", age=i) for i in range(5)], load_bulk=False, ) self.Person.objects.create(name="User B", age=30) self.Person.objects.create(name="User C", age=40) qs = self.Person.objects()[1:2] assert (qs._empty, qs._skip, qs._limit) == (False, 1, 1) assert len(list(qs)) == 1 # Test edge case of [1:1] which should return nothing # and require a hack so that it doesn't clash with limit(0) qs = self.Person.objects()[1:1] assert (qs._empty, qs._skip, qs._limit) == (True, 1, 0) assert len(list(qs)) == 0 qs2 = qs[1:5] # Make sure that further slicing resets _empty assert (qs2._empty, qs2._skip, qs2._limit) == (False, 1, 4) assert len(list(qs2)) == 4 def test_limit_0_returns_all_documents(self): self.Person.objects.create(name="User A", age=20) self.Person.objects.create(name="User B", age=30) n_docs = self.Person.objects().count() persons = list(self.Person.objects().limit(0)) assert len(persons) == 2 == n_docs def test_limit_0(self): """Ensure that QuerySet.limit works as expected.""" self.Person.objects.create(name="User A", age=20) # Test limit with 0 as parameter qs = self.Person.objects.limit(0) assert qs.count() == 0 def test_limit(self): """Ensure that QuerySet.limit works as expected.""" user_a = self.Person.objects.create(name="User A", age=20) _ = self.Person.objects.create(name="User B", age=30) # Test limit on a new queryset people = list(self.Person.objects.limit(1)) assert len(people) == 1 assert people[0] == user_a # Test limit on an existing queryset people = self.Person.objects assert len(people) == 2 people2 = people.limit(1) assert len(people) == 2 assert len(people2) == 1 assert people2[0] == user_a # Test limit with 0 as parameter people = self.Person.objects.limit(0) assert people.count(with_limit_and_skip=True) == 2 assert len(people) == 2 # Test chaining of only after limit person = self.Person.objects().limit(1).only("name").first() assert person == user_a assert person.name == "User A" assert person.age is None def test_skip(self): """Ensure that QuerySet.skip works as expected.""" user_a = self.Person.objects.create(name="User A", age=20) user_b = self.Person.objects.create(name="User B", age=30) # Test skip on a new queryset people = list(self.Person.objects.skip(0)) assert len(people) == 2 assert people[0] == user_a assert people[1] == user_b people = list(self.Person.objects.skip(1)) assert len(people) == 1 assert people[0] == user_b # Test skip on an existing queryset people = self.Person.objects assert len(people) == 2 people2 = people.skip(1) assert len(people) == 2 assert len(people2) == 1 assert people2[0] == user_b # Test chaining of only after skip person = self.Person.objects().skip(1).only("name").first() assert person == user_b assert person.name == "User B" assert person.age is None def test___getitem___invalid_index(self): """Ensure slicing a queryset works as expected.""" with pytest.raises(TypeError): self.Person.objects()["a"] def test_slice(self): """Ensure slicing a queryset works as expected.""" user_a = self.Person.objects.create(name="User A", age=20) user_b = self.Person.objects.create(name="User B", age=30) user_c = self.Person.objects.create(name="User C", age=40) # Test slice limit people = list(self.Person.objects[:2]) assert len(people) == 2 assert people[0] == user_a assert people[1] == user_b # Test slice skip people = list(self.Person.objects[1:]) assert len(people) == 2 assert people[0] == user_b assert people[1] == user_c # Test slice limit and skip people = list(self.Person.objects[1:2]) assert len(people) == 1 assert people[0] == user_b # Test slice limit and skip on an existing queryset people = self.Person.objects assert len(people) == 3 people2 = people[1:2] assert len(people2) == 1 assert people2[0] == user_b # Test slice limit and skip cursor reset qs = self.Person.objects[1:2] # fetch then delete the cursor qs._cursor qs._cursor_obj = None people = list(qs) assert len(people) == 1 assert people[0].name == "User B" # Test empty slice people = list(self.Person.objects[1:1]) assert len(people) == 0 # Test slice out of range people = list(self.Person.objects[80000:80001]) assert len(people) == 0 # Test larger slice __repr__ self.Person.objects.delete() for i in range(55): self.Person(name="A%s" % i, age=i).save() assert self.Person.objects.count() == 55 assert "Person object" == "%s" % self.Person.objects[0] assert ( "[<Person: Person object>, <Person: Person object>]" == "%s" % self.Person.objects[1:3] ) assert ( "[<Person: Person object>, <Person: Person object>]" == "%s" % self.Person.objects[51:53] ) def test_find_one(self): """Ensure that a query using find_one returns a valid result.""" person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Retrieve the first person from the database person = self.Person.objects.first() assert isinstance(person, self.Person) assert person.name == "User A" assert person.age == 20 # Use a query to filter the people found to just person2 person = self.Person.objects(age=30).first() assert person.name == "User B" person = self.Person.objects(age__lt=30).first() assert person.name == "User A" # Use array syntax person = self.Person.objects[0] assert person.name == "User A" person = self.Person.objects[1] assert person.name == "User B" with pytest.raises(IndexError): self.Person.objects[2] # Find a document using just the object id person = self.Person.objects.with_id(person1.id) assert person.name == "User A" with pytest.raises(InvalidQueryError): self.Person.objects(name="User A").with_id(person1.id) def test_get_no_document_exists_raises_doesnotexist(self): assert self.Person.objects.count() == 0 # Try retrieving when no objects exists with pytest.raises(DoesNotExist): self.Person.objects.get() with pytest.raises(self.Person.DoesNotExist): self.Person.objects.get() def test_get_multiple_match_raises_multipleobjectsreturned(self): """Ensure that a query using ``get`` returns at most one result.""" assert self.Person.objects().count() == 0 person1 = self.Person(name="User A", age=20) person1.save() p = self.Person.objects.get() assert p == person1 person2 = self.Person(name="User B", age=20) person2.save() person3 = self.Person(name="User C", age=30) person3.save() # .get called without argument with pytest.raises(MultipleObjectsReturned): self.Person.objects.get() with pytest.raises(self.Person.MultipleObjectsReturned): self.Person.objects.get() # check filtering with pytest.raises(MultipleObjectsReturned): self.Person.objects.get(age__lt=30) with pytest.raises(MultipleObjectsReturned) as exc_info: self.Person.objects(age__lt=30).get() assert "2 or more items returned, instead of 1" == str(exc_info.value) # Use a query to filter the people found to just person2 person = self.Person.objects.get(age=30) assert person == person3 def test_find_array_position(self): """Ensure that query by array position works.""" class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() Blog.objects.create(tags=["a", "b"]) assert Blog.objects(tags__0="a").count() == 1 assert Blog.objects(tags__0="b").count() == 0 assert Blog.objects(tags__1="a").count() == 0 assert Blog.objects(tags__1="b").count() == 1 Blog.drop_collection() comment1 = Comment(name="testa") comment2 = Comment(name="testb") post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) blog1 = Blog.objects.create(posts=[post1, post2]) blog2 = Blog.objects.create(posts=[post2, post1]) blog = Blog.objects(posts__0__comments__0__name="testa").get() assert blog == blog1 blog = Blog.objects(posts__0__comments__0__name="testb").get() assert blog == blog2 query = Blog.objects(posts__1__comments__1__name="testb") assert query.count() == 2 query = Blog.objects(posts__1__comments__1__name="testa") assert query.count() == 0 query = Blog.objects(posts__0__comments__1__name="testa") assert query.count() == 0 Blog.drop_collection() def test_none(self): class A(Document): s = StringField() A.drop_collection() A().save() # validate collection not empty assert A.objects.count() == 1 # update operations assert A.objects.none().update(s="1") == 0 assert A.objects.none().update_one(s="1") == 0 assert A.objects.none().modify(s="1") is None # validate noting change by update operations assert A.objects(s="1").count() == 0 # fetch queries assert A.objects.none().first() is None assert list(A.objects.none()) == [] assert list(A.objects.none().all()) == [] assert list(A.objects.none().limit(1)) == [] assert list(A.objects.none().skip(1)) == [] assert list(A.objects.none()[:5]) == [] def test_chaining(self): class A(Document): s = StringField() class B(Document): ref = ReferenceField(A) boolfield = BooleanField(default=False) A.drop_collection() B.drop_collection() a1 = A(s="test1").save() a2 = A(s="test2").save() B(ref=a1, boolfield=True).save() # Works q1 = B.objects.filter(ref__in=[a1, a2], ref=a1)._query # Doesn't work q2 = B.objects.filter(ref__in=[a1, a2]) q2 = q2.filter(ref=a1)._query assert q1 == q2 a_objects = A.objects(s="test1") query = B.objects(ref__in=a_objects) query = query.filter(boolfield=True) assert query.count() == 1 def test_batch_size(self): """Ensure that batch_size works.""" class A(Document): s = StringField() A.drop_collection() A.objects.insert([A(s=str(i)) for i in range(100)], load_bulk=True) # test iterating over the result set cnt = 0 for _ in A.objects.batch_size(10): cnt += 1 assert cnt == 100 # test chaining qs = A.objects.all() qs = qs.limit(10).batch_size(20).skip(91) cnt = 0 for _ in qs: cnt += 1 assert cnt == 9 # test invalid batch size qs = A.objects.batch_size(-1) with pytest.raises(ValueError): list(qs) def test_batch_size_cloned(self): class A(Document): s = StringField() # test that batch size gets cloned qs = A.objects.batch_size(5) assert qs._batch_size == 5 qs_clone = qs.clone() assert qs_clone._batch_size == 5 def test_update_write_concern(self): """Test that passing write_concern works""" self.Person.drop_collection() write_concern = {"fsync": True} author = self.Person.objects.create(name="Test User") author.save(write_concern=write_concern) # Ensure no regression of #1958 author = self.Person(name="Test User2") author.save(write_concern=None) # will default to {w: 1} result = self.Person.objects.update(set__name="Ross", write_concern={"w": 1}) assert result == 2 result = self.Person.objects.update(set__name="Ross", write_concern={"w": 0}) assert result is None result = self.Person.objects.update_one( set__name="Test User", write_concern={"w": 1} ) assert result == 1 result = self.Person.objects.update_one( set__name="Test User", write_concern={"w": 0} ) assert result is None def test_update_update_has_a_value(self): """Test to ensure that update is passed a value to update to""" self.Person.drop_collection() author = self.Person.objects.create(name="Test User") with pytest.raises(OperationError): self.Person.objects(pk=author.pk).update({}) with pytest.raises(OperationError): self.Person.objects(pk=author.pk).update_one({}) def test_update_array_position(self): """Ensure that updating by array position works. Check update() and update_one() can take syntax like: set__posts__1__comments__1__name="testc" Check that it only works for ListFields. """ class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() comment1 = Comment(name="testa") comment2 = Comment(name="testb") post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) Blog.objects.create(posts=[post1, post2]) Blog.objects.create(posts=[post2, post1]) # Update all of the first comments of second posts of all blogs Blog.objects().update(set__posts__1__comments__0__name="testc") testc_blogs = Blog.objects(posts__1__comments__0__name="testc") assert testc_blogs.count() == 2 Blog.drop_collection() Blog.objects.create(posts=[post1, post2]) Blog.objects.create(posts=[post2, post1]) # Update only the first blog returned by the query Blog.objects().update_one(set__posts__1__comments__1__name="testc") testc_blogs = Blog.objects(posts__1__comments__1__name="testc") assert testc_blogs.count() == 1 # Check that using this indexing syntax on a non-list fails with pytest.raises(InvalidQueryError): Blog.objects().update(set__posts__1__comments__0__name__1="asdf") Blog.drop_collection() def test_update_array_filters(self): """Ensure that updating by array_filters works.""" class Comment(EmbeddedDocument): comment_tags = ListField(StringField()) class Blog(Document): tags = ListField(StringField()) comments = EmbeddedDocumentField(Comment) Blog.drop_collection() # update one Blog.objects.create(tags=["test1", "test2", "test3"]) Blog.objects().update_one( __raw__={"$set": {"tags.$[element]": "test11111"}}, array_filters=[{"element": {"$eq": "test2"}}], ) testc_blogs = Blog.objects(tags="test11111") assert testc_blogs.count() == 1 # modify Blog.drop_collection() # update one Blog.objects.create(tags=["test1", "test2", "test3"]) new_blog = Blog.objects().modify( __raw__={"$set": {"tags.$[element]": "test11111"}}, array_filters=[{"element": {"$eq": "test2"}}], new=True, ) testc_blogs = Blog.objects(tags="test11111") assert new_blog == testc_blogs.first() assert testc_blogs.count() == 1 Blog.drop_collection() # update one inner list comments = Comment(comment_tags=["test1", "test2", "test3"]) Blog.objects.create(comments=comments) Blog.objects().update_one( __raw__={"$set": {"comments.comment_tags.$[element]": "test11111"}}, array_filters=[{"element": {"$eq": "test2"}}], ) testc_blogs = Blog.objects(comments__comment_tags="test11111") assert testc_blogs.count() == 1 # update many Blog.drop_collection() Blog.objects.create(tags=["test1", "test2", "test3", "test_all"]) Blog.objects.create(tags=["test4", "test5", "test6", "test_all"]) Blog.objects().update( __raw__={"$set": {"tags.$[element]": "test11111"}}, array_filters=[{"element": {"$eq": "test2"}}], ) testc_blogs = Blog.objects(tags="test11111") assert testc_blogs.count() == 1 Blog.objects().update( __raw__={"$set": {"tags.$[element]": "test_all1234577"}}, array_filters=[{"element": {"$eq": "test_all"}}], ) testc_blogs = Blog.objects(tags="test_all1234577") assert testc_blogs.count() == 2 def test_update_using_positional_operator(self): """Ensure that the list fields can be updated using the positional operator.""" class Comment(EmbeddedDocument): by = StringField() votes = IntField() class BlogPost(Document): title = StringField() comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() c1 = Comment(by="joe", votes=3) c2 = Comment(by="jane", votes=7) BlogPost(title="ABC", comments=[c1, c2]).save() BlogPost.objects(comments__by="jane").update(inc__comments__S__votes=1) post = BlogPost.objects.first() assert post.comments[1].by == "jane" assert post.comments[1].votes == 8 def test_update_using_positional_operator_matches_first(self): # Currently the $ operator only applies to the first matched item in # the query class Simple(Document): x = ListField() Simple.drop_collection() Simple(x=[1, 2, 3, 2]).save() Simple.objects(x=2).update(inc__x__S=1) simple = Simple.objects.first() assert simple.x == [1, 3, 3, 2] Simple.drop_collection() # You can set multiples Simple.drop_collection() Simple(x=[1, 2, 3, 4]).save() Simple(x=[2, 3, 4, 5]).save() Simple(x=[3, 4, 5, 6]).save() Simple(x=[4, 5, 6, 7]).save() Simple.objects(x=3).update(set__x__S=0) s = Simple.objects() assert s[0].x == [1, 2, 0, 4] assert s[1].x == [2, 0, 4, 5] assert s[2].x == [0, 4, 5, 6] assert s[3].x == [4, 5, 6, 7] # Using "$unset" with an expression like this "array.$" will result in # the array item becoming None, not being removed. Simple.drop_collection() Simple(x=[1, 2, 3, 4, 3, 2, 3, 4]).save() Simple.objects(x=3).update(unset__x__S=1) simple = Simple.objects.first() assert simple.x == [1, 2, None, 4, 3, 2, 3, 4] # Nested updates arent supported yet.. with pytest.raises(OperationError): Simple.drop_collection() Simple(x=[{"test": [1, 2, 3, 4]}]).save() Simple.objects(x__test=2).update(set__x__S__test__S=3) assert simple.x == [1, 2, 3, 4] def test_update_using_positional_operator_embedded_document(self): """Ensure that the embedded documents can be updated using the positional operator.""" class Vote(EmbeddedDocument): score = IntField() class Comment(EmbeddedDocument): by = StringField() votes = EmbeddedDocumentField(Vote) class BlogPost(Document): title = StringField() comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() c1 = Comment(by="joe", votes=Vote(score=3)) c2 = Comment(by="jane", votes=Vote(score=7)) BlogPost(title="ABC", comments=[c1, c2]).save() BlogPost.objects(comments__by="joe").update( set__comments__S__votes=Vote(score=4) ) post = BlogPost.objects.first() assert post.comments[0].by == "joe" assert post.comments[0].votes.score == 4 def test_update_min_max(self): class Scores(Document): high_score = IntField() low_score = IntField() scores = Scores.objects.create(high_score=800, low_score=200) Scores.objects(id=scores.id).update(min__low_score=150) assert Scores.objects.get(id=scores.id).low_score == 150 Scores.objects(id=scores.id).update(min__low_score=250) assert Scores.objects.get(id=scores.id).low_score == 150 Scores.objects(id=scores.id).update(max__high_score=1000) assert Scores.objects.get(id=scores.id).high_score == 1000 Scores.objects(id=scores.id).update(max__high_score=500) assert Scores.objects.get(id=scores.id).high_score == 1000 def test_update_multiple(self): class Product(Document): item = StringField() price = FloatField() product = Product.objects.create(item="ABC", price=10.99) product = Product.objects.create(item="ABC", price=10.99) Product.objects(id=product.id).update(mul__price=1.25) assert Product.objects.get(id=product.id).price == 13.7375 unknown_product = Product.objects.create(item="Unknown") Product.objects(id=unknown_product.id).update(mul__price=100) assert Product.objects.get(id=unknown_product.id).price == 0 def test_updates_can_have_match_operators(self): class Comment(EmbeddedDocument): content = StringField() name = StringField(max_length=120) vote = IntField() class Post(Document): title = StringField(required=True) tags = ListField(StringField()) comments = ListField(EmbeddedDocumentField("Comment")) Post.drop_collection() comm1 = Comment(content="very funny indeed", name="John S", vote=1) comm2 = Comment(content="kind of funny", name="Mark P", vote=0) Post( title="Fun with MongoEngine", tags=["mongodb", "mongoengine"], comments=[comm1, comm2], ).save() Post.objects().update_one(pull__comments__vote__lt=1) assert 1 == len(Post.objects.first().comments) def test_mapfield_update(self): """Ensure that the MapField can be updated.""" class Member(EmbeddedDocument): gender = StringField() age = IntField() class Club(Document): members = MapField(EmbeddedDocumentField(Member)) Club.drop_collection() club = Club() club.members["John"] = Member(gender="M", age=13) club.save() Club.objects().update(set__members={"John": Member(gender="F", age=14)}) club = Club.objects().first() assert club.members["John"].gender == "F" assert club.members["John"].age == 14 def test_dictfield_update(self): """Ensure that the DictField can be updated.""" class Club(Document): members = DictField() club = Club() club.members["John"] = {"gender": "M", "age": 13} club.save() Club.objects().update(set__members={"John": {"gender": "F", "age": 14}}) club = Club.objects().first() assert club.members["John"]["gender"] == "F" assert club.members["John"]["age"] == 14 def test_update_results(self): self.Person.drop_collection() result = self.Person(name="Bob", age=25).update(upsert=True, full_result=True) assert isinstance(result, UpdateResult) assert "upserted" in result.raw_result assert not result.raw_result["updatedExisting"] bob = self.Person.objects.first() result = bob.update(set__age=30, full_result=True) assert isinstance(result, UpdateResult) assert result.raw_result["updatedExisting"] self.Person(name="Bob", age=20).save() result = self.Person.objects(name="Bob").update(set__name="bobby", multi=True) assert result == 2 def test_update_validate(self): class EmDoc(EmbeddedDocument): str_f = StringField() class Doc(Document): str_f = StringField() dt_f = DateTimeField() cdt_f = ComplexDateTimeField() ed_f = EmbeddedDocumentField(EmDoc) with pytest.raises(ValidationError): Doc.objects().update(str_f=1, upsert=True) with pytest.raises(ValidationError): Doc.objects().update(dt_f="datetime", upsert=True) with pytest.raises(ValidationError): Doc.objects().update(ed_f__str_f=1, upsert=True) def test_update_related_models(self): class TestPerson(Document): name = StringField() class TestOrganization(Document): name = StringField() owner = ReferenceField(TestPerson) TestPerson.drop_collection() TestOrganization.drop_collection() p = TestPerson(name="p1") p.save() o = TestOrganization(name="o1") o.save() o.owner = p p.name = "p2" assert o._get_changed_fields() == ["owner"] assert p._get_changed_fields() == ["name"] o.save() assert o._get_changed_fields() == [] assert p._get_changed_fields() == ["name"] # Fails; it's empty # This will do NOTHING at all, even though we changed the name p.save() p.reload() assert p.name == "p2" # Fails; it's still `p1` def test_upsert(self): self.Person.drop_collection() self.Person.objects(pk=ObjectId(), name="Bob", age=30).update(upsert=True) bob = self.Person.objects.first() assert "Bob" == bob.name assert 30 == bob.age def test_upsert_one(self): self.Person.drop_collection() bob = self.Person.objects(name="Bob", age=30).upsert_one() assert "Bob" == bob.name assert 30 == bob.age bob.name = "Bobby" bob.save() bobby = self.Person.objects(name="Bobby", age=30).upsert_one() assert "Bobby" == bobby.name assert 30 == bobby.age assert bob.id == bobby.id def test_set_on_insert(self): self.Person.drop_collection() self.Person.objects(pk=ObjectId()).update( set__name="Bob", set_on_insert__age=30, upsert=True ) bob = self.Person.objects.first() assert "Bob" == bob.name assert 30 == bob.age def test_rename(self): self.Person.drop_collection() self.Person.objects.create(name="Foo", age=11) bob = self.Person.objects.as_pymongo().first() assert "age" in bob assert bob["age"] == 11 self.Person.objects(name="Foo").update(rename__age="person_age") bob = self.Person.objects.as_pymongo().first() assert "age" not in bob assert "person_age" in bob assert bob["person_age"] == 11 def test_save_and_only_on_fields_with_default(self): class Embed(EmbeddedDocument): field = IntField() class B(Document): meta = {"collection": "b"} field = IntField(default=1) embed = EmbeddedDocumentField(Embed, default=Embed) embed_no_default = EmbeddedDocumentField(Embed) # Creating {field : 2, embed : {field: 2}, embed_no_default: {field: 2}} val = 2 embed = Embed() embed.field = val record = B() record.field = val record.embed = embed record.embed_no_default = embed record.save() # Checking it was saved correctly record.reload() assert record.field == 2 assert record.embed_no_default.field == 2 assert record.embed.field == 2 # Request only the _id field and save clone = B.objects().only("id").first() clone.save() # Reload the record and see that the embed data is not lost record.reload() assert record.field == 2 assert record.embed_no_default.field == 2 assert record.embed.field == 2 def test_bulk_insert(self): """Ensure that bulk insert works""" class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): title = StringField(unique=True) tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() # Recreates the collection assert 0 == Blog.objects.count() comment1 = Comment(name="testa") comment2 = Comment(name="testb") post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) # Check bulk insert using load_bulk=False blogs = [Blog(title="%s" % i, posts=[post1, post2]) for i in range(99)] with query_counter() as q: assert q == 0 Blog.objects.insert(blogs, load_bulk=False) assert q == 1 # 1 entry containing the list of inserts assert Blog.objects.count() == len(blogs) Blog.drop_collection() Blog.ensure_indexes() # Check bulk insert using load_bulk=True blogs = [Blog(title="%s" % i, posts=[post1, post2]) for i in range(99)] with query_counter() as q: assert q == 0 Blog.objects.insert(blogs) assert q == 2 # 1 for insert 1 for fetch Blog.drop_collection() comment1 = Comment(name="testa") comment2 = Comment(name="testb") post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) blog1 = Blog(title="code", posts=[post1, post2]) blog2 = Blog(title="mongodb", posts=[post2, post1]) blog1, blog2 = Blog.objects.insert([blog1, blog2]) assert blog1.title == "code" assert blog2.title == "mongodb" assert Blog.objects.count() == 2 # test inserting an existing document (shouldn't be allowed) with pytest.raises(OperationError) as exc_info: blog = Blog.objects.first() Blog.objects.insert(blog) assert ( str(exc_info.value) == "Some documents have ObjectIds, use doc.update() instead" ) # test inserting a query set with pytest.raises(OperationError) as exc_info: blogs_qs = Blog.objects Blog.objects.insert(blogs_qs) assert ( str(exc_info.value) == "Some documents have ObjectIds, use doc.update() instead" ) # insert 1 new doc new_post = Blog(title="code123", id=ObjectId()) Blog.objects.insert(new_post) Blog.drop_collection() blog1 = Blog(title="code", posts=[post1, post2]) blog1 = Blog.objects.insert(blog1) assert blog1.title == "code" assert Blog.objects.count() == 1 Blog.drop_collection() blog1 = Blog(title="code", posts=[post1, post2]) obj_id = Blog.objects.insert(blog1, load_bulk=False) assert isinstance(obj_id, ObjectId) Blog.drop_collection() post3 = Post(comments=[comment1, comment1]) blog1 = Blog(title="foo", posts=[post1, post2]) blog2 = Blog(title="bar", posts=[post2, post3]) Blog.objects.insert([blog1, blog2]) with pytest.raises(NotUniqueError): Blog.objects.insert(Blog(title=blog2.title)) assert Blog.objects.count() == 2 def test_bulk_insert_different_class_fails(self): class Blog(Document): pass class Author(Document): pass # try inserting a different document class with pytest.raises(OperationError): Blog.objects.insert(Author()) def test_bulk_insert_with_wrong_type(self): class Blog(Document): name = StringField() Blog.drop_collection() Blog(name="test").save() with pytest.raises(OperationError): Blog.objects.insert("HELLO WORLD") with pytest.raises(OperationError): Blog.objects.insert({"name": "garbage"}) def test_bulk_insert_update_input_document_ids(self): class Comment(Document): idx = IntField() Comment.drop_collection() # Test with bulk comments = [Comment(idx=idx) for idx in range(20)] for com in comments: assert com.id is None returned_comments = Comment.objects.insert(comments, load_bulk=True) for com in comments: assert isinstance(com.id, ObjectId) input_mapping = {com.id: com.idx for com in comments} saved_mapping = {com.id: com.idx for com in returned_comments} assert input_mapping == saved_mapping Comment.drop_collection() # Test with just one comment = Comment(idx=0) inserted_comment_id = Comment.objects.insert(comment, load_bulk=False) assert comment.id == inserted_comment_id def test_bulk_insert_accepts_doc_with_ids(self): class Comment(Document): id = IntField(primary_key=True) Comment.drop_collection() com1 = Comment(id=0) com2 = Comment(id=1) Comment.objects.insert([com1, com2]) def test_insert_raise_if_duplicate_in_constraint(self): class Comment(Document): id = IntField(primary_key=True) Comment.drop_collection() com1 = Comment(id=0) Comment.objects.insert(com1) with pytest.raises(NotUniqueError): Comment.objects.insert(com1) def test_get_changed_fields_query_count(self): """Make sure we don't perform unnecessary db operations when none of document's fields were updated. """ class Person(Document): name = StringField() owns = ListField(ReferenceField("Organization")) projects = ListField(ReferenceField("Project")) class Organization(Document): name = StringField() owner = ReferenceField(Person) employees = ListField(ReferenceField(Person)) class Project(Document): name = StringField() Person.drop_collection() Organization.drop_collection() Project.drop_collection() r1 = Project(name="r1").save() r2 = Project(name="r2").save() r3 = Project(name="r3").save() p1 = Person(name="p1", projects=[r1, r2]).save() p2 = Person(name="p2", projects=[r2, r3]).save() o1 = Organization(name="o1", employees=[p1]).save() with query_counter() as q: assert q == 0 # Fetching a document should result in a query. org = Organization.objects.get(id=o1.id) assert q == 1 # Checking changed fields of a newly fetched document should not # result in a query. org._get_changed_fields() assert q == 1 # Saving a doc without changing any of its fields should not result # in a query (with or without cascade=False). org = Organization.objects.get(id=o1.id) with query_counter() as q: org.save() assert q == 0 org = Organization.objects.get(id=o1.id) with query_counter() as q: org.save(cascade=False) assert q == 0 # Saving a doc after you append a reference to it should result in # two db operations (a query for the reference and an update). # TODO dereferencing of p2 shouldn't be necessary. org = Organization.objects.get(id=o1.id) with query_counter() as q: org.employees.append(p2) # dereferences p2 org.save() # saves the org assert q == 2 def test_repeated_iteration(self): """Ensure that QuerySet rewinds itself one iteration finishes.""" self.Person(name="Person 1").save() self.Person(name="Person 2").save() queryset = self.Person.objects people1 = [person for person in queryset] people2 = [person for person in queryset] # Check that it still works even if iteration is interrupted. for _person in queryset: break people3 = [person for person in queryset] assert people1 == people2 assert people1 == people3 def test_repr(self): """Test repr behavior isnt destructive""" class Doc(Document): number = IntField() def __repr__(self): return "<Doc: %s>" % self.number Doc.drop_collection() Doc.objects.insert([Doc(number=i) for i in range(1000)], load_bulk=True) docs = Doc.objects.order_by("number") assert docs.count() == 1000 docs_string = "%s" % docs assert "Doc: 0" in docs_string assert docs.count() == 1000 assert "(remaining elements truncated)" in "%s" % docs # Limit and skip docs = docs[1:4] assert "[<Doc: 1>, <Doc: 2>, <Doc: 3>]" == "%s" % docs assert docs.count(with_limit_and_skip=True) == 3 for _ in docs: assert ".. queryset mid-iteration .." == repr(docs) def test_regex_query_shortcuts(self): """Ensure that contains, startswith, endswith, etc work.""" person = self.Person(name="Guido van Rossum") person.save() # Test contains obj = self.Person.objects(name__contains="van").first() assert obj == person obj = self.Person.objects(name__contains="Van").first() assert obj is None # Test icontains obj = self.Person.objects(name__icontains="Van").first() assert obj == person # Test startswith obj = self.Person.objects(name__startswith="Guido").first() assert obj == person obj = self.Person.objects(name__startswith="guido").first() assert obj is None # Test istartswith obj = self.Person.objects(name__istartswith="guido").first() assert obj == person # Test endswith obj = self.Person.objects(name__endswith="Rossum").first() assert obj == person obj = self.Person.objects(name__endswith="rossuM").first() assert obj is None # Test iendswith obj = self.Person.objects(name__iendswith="rossuM").first() assert obj == person # Test exact obj = self.Person.objects(name__exact="Guido van Rossum").first() assert obj == person obj = self.Person.objects(name__exact="Guido van rossum").first() assert obj is None obj = self.Person.objects(name__exact="Guido van Rossu").first() assert obj is None # Test iexact obj = self.Person.objects(name__iexact="gUIDO VAN rOSSUM").first() assert obj == person obj = self.Person.objects(name__iexact="gUIDO VAN rOSSU").first() assert obj is None # Test wholeword obj = self.Person.objects(name__wholeword="Guido").first() assert obj == person obj = self.Person.objects(name__wholeword="rossum").first() assert obj is None obj = self.Person.objects(name__wholeword="Rossu").first() assert obj is None # Test iwholeword obj = self.Person.objects(name__iwholeword="rOSSUM").first() assert obj == person obj = self.Person.objects(name__iwholeword="rOSSU").first() assert obj is None # Test regex obj = self.Person.objects(name__regex="^[Guido].*[Rossum]$").first() assert obj == person obj = self.Person.objects(name__regex="^[guido].*[rossum]$").first() assert obj is None obj = self.Person.objects(name__regex="^[uido].*[Rossum]$").first() assert obj is None # Test iregex obj = self.Person.objects(name__iregex="^[guido].*[rossum]$").first() assert obj == person obj = self.Person.objects(name__iregex="^[Uido].*[Rossum]$").first() assert obj is None # Test unsafe expressions person = self.Person(name="Guido van Rossum [.'Geek']") person.save() obj = self.Person.objects(name__icontains="[.'Geek").first() assert obj == person def test_not(self): """Ensure that the __not operator works as expected.""" alice = self.Person(name="Alice", age=25) alice.save() obj = self.Person.objects(name__iexact="alice").first() assert obj == alice obj = self.Person.objects(name__not__iexact="alice").first() assert obj is None def test_filter_chaining(self): """Ensure filters can be chained together.""" class Blog(Document): id = StringField(primary_key=True) class BlogPost(Document): blog = ReferenceField(Blog) title = StringField() is_published = BooleanField() published_date = DateTimeField() @queryset_manager def published(doc_cls, queryset): return queryset(is_published=True) Blog.drop_collection() BlogPost.drop_collection() blog_1 = Blog(id="1") blog_2 = Blog(id="2") blog_3 = Blog(id="3") blog_1.save() blog_2.save() blog_3.save() BlogPost.objects.create( blog=blog_1, title="Blog Post #1", is_published=True, published_date=datetime.datetime(2010, 1, 5, 0, 0, 0), ) BlogPost.objects.create( blog=blog_2, title="Blog Post #2", is_published=True, published_date=datetime.datetime(2010, 1, 6, 0, 0, 0), ) BlogPost.objects.create( blog=blog_3, title="Blog Post #3", is_published=True, published_date=datetime.datetime(2010, 1, 7, 0, 0, 0), ) # find all published blog posts before 2010-01-07 published_posts = BlogPost.published() published_posts = published_posts.filter( published_date__lt=datetime.datetime(2010, 1, 7, 0, 0, 0) ) assert published_posts.count() == 2 blog_posts = BlogPost.objects blog_posts = blog_posts.filter(blog__in=[blog_1, blog_2]) blog_posts = blog_posts.filter(blog=blog_3) assert blog_posts.count() == 0 BlogPost.drop_collection() Blog.drop_collection() def test_filter_chaining_with_regex(self): person = self.Person(name="Guido van Rossum") person.save() people = self.Person.objects people = ( people.filter(name__startswith="Gui") .filter(name__not__endswith="tum") .filter(name__icontains="VAN") .filter(name__regex="^Guido") .filter(name__wholeword="Guido") .filter(name__wholeword="van") ) assert people.count() == 1 def assertSequence(self, qs, expected): qs = list(qs) expected = list(expected) assert len(qs) == len(expected) for i in range(len(qs)): assert qs[i] == expected[i] def test_ordering(self): """Ensure default ordering is applied and can be overridden.""" class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = {"ordering": ["-published_date"]} BlogPost.drop_collection() blog_post_1 = BlogPost.objects.create( title="Blog Post #1", published_date=datetime.datetime(2010, 1, 5, 0, 0, 0) ) blog_post_2 = BlogPost.objects.create( title="Blog Post #2", published_date=datetime.datetime(2010, 1, 6, 0, 0, 0) ) blog_post_3 = BlogPost.objects.create( title="Blog Post #3", published_date=datetime.datetime(2010, 1, 7, 0, 0, 0) ) # get the "first" BlogPost using default ordering # from BlogPost.meta.ordering expected = [blog_post_3, blog_post_2, blog_post_1] self.assertSequence(BlogPost.objects.all(), expected) # override default ordering, order BlogPosts by "published_date" qs = BlogPost.objects.order_by("+published_date") expected = [blog_post_1, blog_post_2, blog_post_3] self.assertSequence(qs, expected) def test_clear_ordering(self): """Ensure that the default ordering can be cleared by calling order_by() w/o any arguments. """ ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version) class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = {"ordering": ["-published_date"]} BlogPost.drop_collection() # default ordering should be used by default with db_ops_tracker() as q: BlogPost.objects.filter(title="whatever").first() assert len(q.get_ops()) == 1 assert q.get_ops()[0][CMD_QUERY_KEY][ORDER_BY_KEY] == {"published_date": -1} # calling order_by() should clear the default ordering with db_ops_tracker() as q: BlogPost.objects.filter(title="whatever").order_by().first() assert len(q.get_ops()) == 1 assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY] # calling an explicit order_by should use a specified sort with db_ops_tracker() as q: BlogPost.objects.filter(title="whatever").order_by("published_date").first() assert len(q.get_ops()) == 1 assert q.get_ops()[0][CMD_QUERY_KEY][ORDER_BY_KEY] == {"published_date": 1} # calling order_by() after an explicit sort should clear it with db_ops_tracker() as q: qs = BlogPost.objects.filter(title="whatever").order_by("published_date") qs.order_by().first() assert len(q.get_ops()) == 1 assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY] def test_no_ordering_for_get(self): """Ensure that Doc.objects.get doesn't use any ordering.""" ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version) class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = {"ordering": ["-published_date"]} BlogPost.objects.create( title="whatever", published_date=datetime.datetime.utcnow() ) with db_ops_tracker() as q: BlogPost.objects.get(title="whatever") assert len(q.get_ops()) == 1 assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY] # Ordering should be ignored for .get even if we set it explicitly with db_ops_tracker() as q: BlogPost.objects.order_by("-title").get(title="whatever") assert len(q.get_ops()) == 1 assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY] def test_find_embedded(self): """Ensure that an embedded document is properly returned from different manners of querying. """ class User(EmbeddedDocument): name = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) BlogPost.drop_collection() user = User(name="Test User") BlogPost.objects.create(author=user, content="Had a good coffee today...") result = BlogPost.objects.first() assert isinstance(result.author, User) assert result.author.name == "Test User" result = BlogPost.objects.get(author__name=user.name) assert isinstance(result.author, User) assert result.author.name == "Test User" result = BlogPost.objects.get(author={"name": user.name}) assert isinstance(result.author, User) assert result.author.name == "Test User" # Fails, since the string is not a type that is able to represent the # author's document structure (should be dict) with pytest.raises(InvalidQueryError): BlogPost.objects.get(author=user.name) def test_find_empty_embedded(self): """Ensure that you can save and find an empty embedded document.""" class User(EmbeddedDocument): name = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) BlogPost.drop_collection() BlogPost.objects.create(content="Anonymous post...") result = BlogPost.objects.get(author=None) assert result.author is None def test_find_dict_item(self): """Ensure that DictField items may be found.""" class BlogPost(Document): info = DictField() BlogPost.drop_collection() post = BlogPost(info={"title": "test"}) post.save() post_obj = BlogPost.objects(info__title="test").first() assert post_obj.id == post.id BlogPost.drop_collection() @requires_mongodb_lt_42 def test_exec_js_query(self): """Ensure that queries are properly formed for use in exec_js.""" class BlogPost(Document): hits = IntField() published = BooleanField() BlogPost.drop_collection() post1 = BlogPost(hits=1, published=False) post1.save() post2 = BlogPost(hits=1, published=True) post2.save() post3 = BlogPost(hits=1, published=True) post3.save() js_func = """ function(hitsField) { var count = 0; db[collection].find(query).forEach(function(doc) { count += doc[hitsField]; }); return count; } """ # Ensure that normal queries work c = BlogPost.objects(published=True).exec_js(js_func, "hits") assert c == 2 c = BlogPost.objects(published=False).exec_js(js_func, "hits") assert c == 1 BlogPost.drop_collection() @requires_mongodb_lt_42 def test_exec_js_field_sub(self): """Ensure that field substitutions occur properly in exec_js functions.""" class Comment(EmbeddedDocument): content = StringField(db_field="body") class BlogPost(Document): name = StringField(db_field="doc-name") comments = ListField(EmbeddedDocumentField(Comment), db_field="cmnts") BlogPost.drop_collection() comments1 = [Comment(content="cool"), Comment(content="yay")] post1 = BlogPost(name="post1", comments=comments1) post1.save() comments2 = [Comment(content="nice stuff")] post2 = BlogPost(name="post2", comments=comments2) post2.save() code = """ function getComments() { var comments = []; db[collection].find(query).forEach(function(doc) { var docComments = doc[~comments]; for (var i = 0; i < docComments.length; i++) { comments.push({ 'document': doc[~name], 'comment': doc[~comments][i][~comments.content] }); } }); return comments; } """ sub_code = BlogPost.objects._sub_js_fields(code) code_chunks = ['doc["cmnts"];', 'doc["doc-name"],', 'doc["cmnts"][i]["body"]'] for chunk in code_chunks: assert chunk in sub_code results = BlogPost.objects.exec_js(code) expected_results = [ {"comment": "cool", "document": "post1"}, {"comment": "yay", "document": "post1"}, {"comment": "nice stuff", "document": "post2"}, ] assert results == expected_results # Test template style code = "{{~comments.content}}" sub_code = BlogPost.objects._sub_js_fields(code) assert "cmnts.body" == sub_code BlogPost.drop_collection() def test_delete(self): """Ensure that documents are properly deleted from the database.""" self.Person(name="User A", age=20).save() self.Person(name="User B", age=30).save() self.Person(name="User C", age=40).save() assert self.Person.objects.count() == 3 self.Person.objects(age__lt=30).delete() assert self.Person.objects.count() == 2 self.Person.objects.delete() assert self.Person.objects.count() == 0 def test_reverse_delete_rule_cascade(self): """Ensure cascading deletion of referring documents from the database.""" class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) BlogPost.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() BlogPost(content="Watching TV", author=me).save() BlogPost(content="Chilling out", author=me).save() BlogPost(content="Pro Testing", author=someoneelse).save() assert 3 == BlogPost.objects.count() self.Person.objects(name="Test User").delete() assert 1 == BlogPost.objects.count() def test_reverse_delete_rule_cascade_on_abstract_document(self): """Ensure cascading deletion of referring documents from the database does not fail on abstract document. """ class AbstractBlogPost(Document): meta = {"abstract": True} author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() BlogPost(content="Watching TV", author=me).save() BlogPost(content="Chilling out", author=me).save() BlogPost(content="Pro Testing", author=someoneelse).save() assert 3 == BlogPost.objects.count() self.Person.objects(name="Test User").delete() assert 1 == BlogPost.objects.count() def test_reverse_delete_rule_cascade_cycle(self): """Ensure reference cascading doesn't loop if reference graph isn't a tree """ class Dummy(Document): reference = ReferenceField("self", reverse_delete_rule=CASCADE) base = Dummy().save() other = Dummy(reference=base).save() base.reference = other base.save() base.delete() with pytest.raises(DoesNotExist): base.reload() with pytest.raises(DoesNotExist): other.reload() def test_reverse_delete_rule_cascade_complex_cycle(self): """Ensure reference cascading doesn't loop if reference graph isn't a tree """ class Category(Document): name = StringField() class Dummy(Document): reference = ReferenceField("self", reverse_delete_rule=CASCADE) cat = ReferenceField(Category, reverse_delete_rule=CASCADE) cat = Category(name="cat").save() base = Dummy(cat=cat).save() other = Dummy(reference=base).save() other2 = Dummy(reference=other).save() base.reference = other base.save() cat.delete() with pytest.raises(DoesNotExist): base.reload() with pytest.raises(DoesNotExist): other.reload() with pytest.raises(DoesNotExist): other2.reload() def test_reverse_delete_rule_cascade_self_referencing(self): """Ensure self-referencing CASCADE deletes do not result in infinite loop """ class Category(Document): name = StringField() parent = ReferenceField("self", reverse_delete_rule=CASCADE) Category.drop_collection() num_children = 3 base = Category(name="Root") base.save() # Create a simple parent-child tree for i in range(num_children): child_name = "Child-%i" % i child = Category(name=child_name, parent=base) child.save() for i in range(num_children): child_child_name = "Child-Child-%i" % i child_child = Category(name=child_child_name, parent=child) child_child.save() tree_size = 1 + num_children + (num_children * num_children) assert tree_size == Category.objects.count() assert num_children == Category.objects(parent=base).count() # The delete should effectively wipe out the Category collection # without resulting in infinite parent-child cascade recursion base.delete() assert 0 == Category.objects.count() def test_reverse_delete_rule_nullify(self): """Ensure nullification of references to deleted documents.""" class Category(Document): name = StringField() class BlogPost(Document): content = StringField() category = ReferenceField(Category, reverse_delete_rule=NULLIFY) BlogPost.drop_collection() Category.drop_collection() lameness = Category(name="Lameness") lameness.save() post = BlogPost(content="Watching TV", category=lameness) post.save() assert BlogPost.objects.count() == 1 assert BlogPost.objects.first().category.name == "Lameness" Category.objects.delete() assert BlogPost.objects.count() == 1 assert BlogPost.objects.first().category is None def test_reverse_delete_rule_nullify_on_abstract_document(self): """Ensure nullification of references to deleted documents when reference is on an abstract document. """ class AbstractBlogPost(Document): meta = {"abstract": True} author = ReferenceField(self.Person, reverse_delete_rule=NULLIFY) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() BlogPost(content="Watching TV", author=me).save() assert BlogPost.objects.count() == 1 assert BlogPost.objects.first().author == me self.Person.objects(name="Test User").delete() assert BlogPost.objects.count() == 1 assert BlogPost.objects.first().author is None def test_reverse_delete_rule_deny(self): """Ensure deletion gets denied on documents that still have references to them. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=DENY) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name="Test User") me.save() post = BlogPost(content="Watching TV", author=me) post.save() with pytest.raises(OperationError): self.Person.objects.delete() def test_reverse_delete_rule_deny_on_abstract_document(self): """Ensure deletion gets denied on documents that still have references to them, when reference is on an abstract document. """ class AbstractBlogPost(Document): meta = {"abstract": True} author = ReferenceField(self.Person, reverse_delete_rule=DENY) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name="Test User") me.save() BlogPost(content="Watching TV", author=me).save() assert 1 == BlogPost.objects.count() with pytest.raises(OperationError): self.Person.objects.delete() def test_reverse_delete_rule_pull(self): """Ensure pulling of references to deleted documents.""" class BlogPost(Document): content = StringField() authors = ListField(ReferenceField(self.Person, reverse_delete_rule=PULL)) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() post = BlogPost(content="Watching TV", authors=[me, someoneelse]) post.save() another = BlogPost(content="Chilling Out", authors=[someoneelse]) another.save() someoneelse.delete() post.reload() another.reload() assert post.authors == [me] assert another.authors == [] def test_reverse_delete_rule_pull_on_abstract_documents(self): """Ensure pulling of references to deleted documents when reference is defined on an abstract document.. """ class AbstractBlogPost(Document): meta = {"abstract": True} authors = ListField(ReferenceField(self.Person, reverse_delete_rule=PULL)) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() post = BlogPost(content="Watching TV", authors=[me, someoneelse]) post.save() another = BlogPost(content="Chilling Out", authors=[someoneelse]) another.save() someoneelse.delete() post.reload() another.reload() assert post.authors == [me] assert another.authors == [] def test_delete_with_limits(self): class Log(Document): pass Log.drop_collection() for i in range(10): Log().save() Log.objects()[3:5].delete() assert 8 == Log.objects.count() def test_delete_with_limit_handles_delete_rules(self): """Ensure cascading deletion of referring documents from the database.""" class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) BlogPost.drop_collection() me = self.Person(name="Test User") me.save() someoneelse = self.Person(name="Some-one Else") someoneelse.save() BlogPost(content="Watching TV", author=me).save() BlogPost(content="Chilling out", author=me).save() BlogPost(content="Pro Testing", author=someoneelse).save() assert 3 == BlogPost.objects.count() self.Person.objects()[:1].delete() assert 1 == BlogPost.objects.count() def test_delete_edge_case_with_write_concern_0_return_None(self): """Return None if the delete operation is unacknowledged. If we use an unack'd write concern, we don't really know how many documents have been deleted. """ p1 = self.Person(name="User Z", age=20).save() del_result = p1.delete(w=0) assert del_result is None def test_reference_field_find(self): """Ensure cascading deletion of referring documents from the database.""" class BlogPost(Document): content = StringField() author = ReferenceField(self.Person) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name="Test User").save() BlogPost(content="test 123", author=me).save() assert 1 == BlogPost.objects(author=me).count() assert 1 == BlogPost.objects(author=me.pk).count() assert 1 == BlogPost.objects(author="%s" % me.pk).count() assert 1 == BlogPost.objects(author__in=[me]).count() assert 1 == BlogPost.objects(author__in=[me.pk]).count() assert 1 == BlogPost.objects(author__in=["%s" % me.pk]).count() def test_reference_field_find_dbref(self): """Ensure cascading deletion of referring documents from the database.""" class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, dbref=True) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name="Test User").save() BlogPost(content="test 123", author=me).save() assert 1 == BlogPost.objects(author=me).count() assert 1 == BlogPost.objects(author=me.pk).count() assert 1 == BlogPost.objects(author="%s" % me.pk).count() assert 1 == BlogPost.objects(author__in=[me]).count() assert 1 == BlogPost.objects(author__in=[me.pk]).count() assert 1 == BlogPost.objects(author__in=["%s" % me.pk]).count() def test_update_intfield_operator(self): class BlogPost(Document): hits = IntField() BlogPost.drop_collection() post = BlogPost(hits=5) post.save() BlogPost.objects.update_one(set__hits=10) post.reload() assert post.hits == 10 BlogPost.objects.update_one(inc__hits=1) post.reload() assert post.hits == 11 BlogPost.objects.update_one(dec__hits=1) post.reload() assert post.hits == 10 # Negative dec operator is equal to a positive inc operator BlogPost.objects.update_one(dec__hits=-1) post.reload() assert post.hits == 11 def test_update_decimalfield_operator(self): class BlogPost(Document): review = DecimalField() BlogPost.drop_collection() post = BlogPost(review=3.5) post.save() BlogPost.objects.update_one(inc__review=0.1) # test with floats post.reload() assert float(post.review) == 3.6 BlogPost.objects.update_one(dec__review=0.1) post.reload() assert float(post.review) == 3.5 BlogPost.objects.update_one(inc__review=Decimal(0.12)) # test with Decimal post.reload() assert float(post.review) == 3.62 BlogPost.objects.update_one(dec__review=Decimal(0.12)) post.reload() assert float(post.review) == 3.5 def test_update_decimalfield_operator_not_working_with_force_string(self): class BlogPost(Document): review = DecimalField(force_string=True) BlogPost.drop_collection() post = BlogPost(review=3.5) post.save() with pytest.raises(OperationError): BlogPost.objects.update_one(inc__review=0.1) # test with floats def test_update_listfield_operator(self): """Ensure that atomic updates work properly.""" class BlogPost(Document): tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(tags=["test"]) post.save() # ListField operator BlogPost.objects.update(push__tags="mongo") post.reload() assert "mongo" in post.tags BlogPost.objects.update_one(push_all__tags=["db", "nosql"]) post.reload() assert "db" in post.tags assert "nosql" in post.tags tags = post.tags[:-1] BlogPost.objects.update(pop__tags=1) post.reload() assert post.tags == tags BlogPost.objects.update_one(add_to_set__tags="unique") BlogPost.objects.update_one(add_to_set__tags="unique") post.reload() assert post.tags.count("unique") == 1 BlogPost.drop_collection() def test_update_unset(self): class BlogPost(Document): title = StringField() BlogPost.drop_collection() post = BlogPost(title="garbage").save() assert post.title is not None BlogPost.objects.update_one(unset__title=1) post.reload() assert post.title is None pymongo_doc = BlogPost.objects.as_pymongo().first() assert "title" not in pymongo_doc def test_update_push_with_position(self): """Ensure that the 'push' update with position works properly.""" class BlogPost(Document): slug = StringField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost.objects.create(slug="test") BlogPost.objects.filter(id=post.id).update(push__tags="code") BlogPost.objects.filter(id=post.id).update(push__tags__0=["mongodb", "python"]) post.reload() assert post.tags == ["mongodb", "python", "code"] BlogPost.objects.filter(id=post.id).update(set__tags__2="java") post.reload() assert post.tags == ["mongodb", "python", "java"] # test push with singular value BlogPost.objects.filter(id=post.id).update(push__tags__0="scala") post.reload() assert post.tags == ["scala", "mongodb", "python", "java"] def test_update_push_list_of_list(self): """Ensure that the 'push' update operation works in the list of list""" class BlogPost(Document): slug = StringField() tags = ListField() BlogPost.drop_collection() post = BlogPost(slug="test").save() BlogPost.objects.filter(slug="test").update(push__tags=["value1", 123]) post.reload() assert post.tags == [["value1", 123]] def test_update_push_and_pull_add_to_set(self): """Ensure that the 'pull' update operation works correctly.""" class BlogPost(Document): slug = StringField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(slug="test") post.save() BlogPost.objects.filter(id=post.id).update(push__tags="code") post.reload() assert post.tags == ["code"] BlogPost.objects.filter(id=post.id).update(push_all__tags=["mongodb", "code"]) post.reload() assert post.tags == ["code", "mongodb", "code"] BlogPost.objects(slug="test").update(pull__tags="code") post.reload() assert post.tags == ["mongodb"] BlogPost.objects(slug="test").update(pull_all__tags=["mongodb", "code"]) post.reload() assert post.tags == [] BlogPost.objects(slug="test").update( __raw__={"$addToSet": {"tags": {"$each": ["code", "mongodb", "code"]}}} ) post.reload() assert post.tags == ["code", "mongodb"] @requires_mongodb_gte_42 def test_aggregation_update(self): """Ensure that the 'aggregation_update' update works correctly.""" class BlogPost(Document): slug = StringField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(slug="test") post.save() BlogPost.objects(slug="test").update( __raw__=[{"$set": {"slug": {"$concat": ["$slug", " ", "$slug"]}}}], ) post.reload() assert post.slug == "test test" BlogPost.objects(slug="test test").update( __raw__=[ {"$set": {"slug": {"$concat": ["$slug", " ", "it"]}}}, # test test it { "$set": {"slug": {"$concat": ["When", " ", "$slug"]}} }, # When test test it ], ) post.reload() assert post.slug == "When test test it" def test_combination_of_mongoengine_and__raw__(self): """Ensure that the '__raw__' update/query works in combination with mongoengine syntax correctly.""" class BlogPost(Document): slug = StringField() foo = StringField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(slug="test", foo="bar") post.save() BlogPost.objects(slug="test").update( foo="baz", __raw__={"$set": {"slug": "test test"}}, ) post.reload() assert post.slug == "test test" assert post.foo == "baz" assert BlogPost.objects(foo="baz", __raw__={"slug": "test test"}).count() == 1 assert ( BlogPost.objects(foo__ne="bar", __raw__={"slug": {"$ne": "test"}}).count() == 1 ) assert ( BlogPost.objects(foo="baz", __raw__={"slug": {"$ne": "test test"}}).count() == 0 ) assert ( BlogPost.objects(foo__ne="baz", __raw__={"slug": "test test"}).count() == 0 ) assert ( BlogPost.objects( foo__ne="baz", __raw__={"slug": {"$ne": "test test"}} ).count() == 0 ) def test_add_to_set_each(self): class Item(Document): name = StringField(required=True) description = StringField(max_length=50) parents = ListField(ReferenceField("self")) Item.drop_collection() item = Item(name="test item").save() parent_1 = Item(name="parent 1").save() parent_2 = Item(name="parent 2").save() item.update(add_to_set__parents=[parent_1, parent_2, parent_1]) item.reload() assert [parent_1, parent_2] == item.parents def test_pull_nested(self): class Collaborator(EmbeddedDocument): user = StringField() def __unicode__(self): return "%s" % self.user class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = ListField(EmbeddedDocumentField(Collaborator)) Site.drop_collection() c = Collaborator(user="Esteban") s = Site(name="test", collaborators=[c]).save() Site.objects(id=s.id).update_one(pull__collaborators__user="Esteban") assert Site.objects.first().collaborators == [] with pytest.raises(InvalidQueryError): Site.objects(id=s.id).update_one(pull_all__collaborators__user=["Ross"]) def test_pull_from_nested_embedded(self): class User(EmbeddedDocument): name = StringField() def __unicode__(self): return "%s" % self.name class Collaborator(EmbeddedDocument): helpful = ListField(EmbeddedDocumentField(User)) unhelpful = ListField(EmbeddedDocumentField(User)) class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = EmbeddedDocumentField(Collaborator) Site.drop_collection() c = User(name="Esteban") f = User(name="Frank") s = Site( name="test", collaborators=Collaborator(helpful=[c], unhelpful=[f]) ).save() Site.objects(id=s.id).update_one(pull__collaborators__helpful=c) assert Site.objects.first().collaborators["helpful"] == [] Site.objects(id=s.id).update_one( pull__collaborators__unhelpful={"name": "Frank"} ) assert Site.objects.first().collaborators["unhelpful"] == [] with pytest.raises(InvalidQueryError): Site.objects(id=s.id).update_one( pull_all__collaborators__helpful__name=["Ross"] ) def test_pull_from_nested_embedded_using_in_nin(self): """Ensure that the 'pull' update operation works on embedded documents using 'in' and 'nin' operators.""" class User(EmbeddedDocument): name = StringField() def __unicode__(self): return "%s" % self.name class Collaborator(EmbeddedDocument): helpful = ListField(EmbeddedDocumentField(User)) unhelpful = ListField(EmbeddedDocumentField(User)) class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = EmbeddedDocumentField(Collaborator) Site.drop_collection() a = User(name="Esteban") b = User(name="Frank") x = User(name="Harry") y = User(name="John") s = Site( name="test", collaborators=Collaborator(helpful=[a, b], unhelpful=[x, y]) ).save() Site.objects(id=s.id).update_one( pull__collaborators__helpful__name__in=["Esteban"] ) # Pull a assert Site.objects.first().collaborators["helpful"] == [b] Site.objects(id=s.id).update_one( pull__collaborators__unhelpful__name__nin=["John"] ) # Pull x assert Site.objects.first().collaborators["unhelpful"] == [y] def test_pull_from_nested_mapfield(self): class Collaborator(EmbeddedDocument): user = StringField() def __unicode__(self): return "%s" % self.user class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = MapField(ListField(EmbeddedDocumentField(Collaborator))) Site.drop_collection() c = Collaborator(user="Esteban") f = Collaborator(user="Frank") s = Site(name="test", collaborators={"helpful": [c], "unhelpful": [f]}) s.save() Site.objects(id=s.id).update_one(pull__collaborators__helpful__user="Esteban") assert Site.objects.first().collaborators["helpful"] == [] Site.objects(id=s.id).update_one( pull__collaborators__unhelpful={"user": "Frank"} ) assert Site.objects.first().collaborators["unhelpful"] == [] with pytest.raises(InvalidQueryError): Site.objects(id=s.id).update_one( pull_all__collaborators__helpful__user=["Ross"] ) def test_pull_in_genericembedded_field(self): class Foo(EmbeddedDocument): name = StringField() class Bar(Document): foos = ListField(GenericEmbeddedDocumentField(choices=[Foo])) Bar.drop_collection() foo = Foo(name="bar") bar = Bar(foos=[foo]).save() Bar.objects(id=bar.id).update(pull__foos=foo) bar.reload() assert len(bar.foos) == 0 def test_update_one_check_return_with_full_result(self): class BlogTag(Document): name = StringField(required=True) BlogTag.drop_collection() BlogTag(name="garbage").save() default_update = BlogTag.objects.update_one(name="new") assert default_update == 1 full_result_update = BlogTag.objects.update_one(name="new", full_result=True) assert isinstance(full_result_update, UpdateResult) def test_update_one_pop_generic_reference(self): class BlogTag(Document): name = StringField(required=True) class BlogPost(Document): slug = StringField() tags = ListField(ReferenceField(BlogTag), required=True) BlogPost.drop_collection() BlogTag.drop_collection() tag_1 = BlogTag(name="code") tag_1.save() tag_2 = BlogTag(name="mongodb") tag_2.save() post = BlogPost(slug="test", tags=[tag_1]) post.save() post = BlogPost(slug="test-2", tags=[tag_1, tag_2]) post.save() assert len(post.tags) == 2 BlogPost.objects(slug="test-2").update_one(pop__tags=-1) post.reload() assert len(post.tags) == 1 BlogPost.drop_collection() BlogTag.drop_collection() def test_editting_embedded_objects(self): class BlogTag(EmbeddedDocument): name = StringField(required=True) class BlogPost(Document): slug = StringField() tags = ListField(EmbeddedDocumentField(BlogTag), required=True) BlogPost.drop_collection() tag_1 = BlogTag(name="code") tag_2 = BlogTag(name="mongodb") post = BlogPost(slug="test", tags=[tag_1]) post.save() post = BlogPost(slug="test-2", tags=[tag_1, tag_2]) post.save() assert len(post.tags) == 2 BlogPost.objects(slug="test-2").update_one(set__tags__0__name="python") post.reload() assert post.tags[0].name == "python" BlogPost.objects(slug="test-2").update_one(pop__tags=-1) post.reload() assert len(post.tags) == 1 BlogPost.drop_collection() def test_set_list_embedded_documents(self): class Author(EmbeddedDocument): name = StringField() class Message(Document): title = StringField() authors = ListField(EmbeddedDocumentField("Author")) Message.drop_collection() message = Message(title="hello", authors=[Author(name="Harry")]) message.save() Message.objects(authors__name="Harry").update_one( set__authors__S=Author(name="Ross") ) message = message.reload() assert message.authors[0].name == "Ross" Message.objects(authors__name="Ross").update_one( set__authors=[ Author(name="Harry"), Author(name="Ross"), Author(name="Adam"), ] ) message = message.reload() assert message.authors[0].name == "Harry" assert message.authors[1].name == "Ross" assert message.authors[2].name == "Adam" def test_set_generic_embedded_documents(self): class Bar(EmbeddedDocument): name = StringField() class User(Document): username = StringField() bar = GenericEmbeddedDocumentField(choices=[Bar]) User.drop_collection() User(username="abc").save() User.objects(username="abc").update(set__bar=Bar(name="test"), upsert=True) user = User.objects(username="abc").first() assert user.bar.name == "test" def test_reload_embedded_docs_instance(self): class SubDoc(EmbeddedDocument): val = IntField() class Doc(Document): embedded = EmbeddedDocumentField(SubDoc) doc = Doc(embedded=SubDoc(val=0)).save() doc.reload() assert doc.pk == doc.embedded._instance.pk def test_reload_list_embedded_docs_instance(self): class SubDoc(EmbeddedDocument): val = IntField() class Doc(Document): embedded = ListField(EmbeddedDocumentField(SubDoc)) doc = Doc(embedded=[SubDoc(val=0)]).save() doc.reload() assert doc.pk == doc.embedded[0]._instance.pk def test_order_by(self): """Ensure that QuerySets may be ordered.""" self.Person(name="User B", age=40).save() self.Person(name="User A", age=20).save() self.Person(name="User C", age=30).save() names = [p.name for p in self.Person.objects.order_by("-age")] assert names == ["User B", "User C", "User A"] names = [p.name for p in self.Person.objects.order_by("+age")] assert names == ["User A", "User C", "User B"] names = [p.name for p in self.Person.objects.order_by("age")] assert names == ["User A", "User C", "User B"] ages = [p.age for p in self.Person.objects.order_by("-name")] assert ages == [30, 40, 20] ages = [p.age for p in self.Person.objects.order_by()] assert ages == [40, 20, 30] ages = [p.age for p in self.Person.objects.order_by("")] assert ages == [40, 20, 30] def test_order_by_optional(self): class BlogPost(Document): title = StringField() published_date = DateTimeField(required=False) BlogPost.drop_collection() blog_post_3 = BlogPost.objects.create( title="Blog Post #3", published_date=datetime.datetime(2010, 1, 6, 0, 0, 0) ) blog_post_2 = BlogPost.objects.create( title="Blog Post #2", published_date=datetime.datetime(2010, 1, 5, 0, 0, 0) ) blog_post_4 = BlogPost.objects.create( title="Blog Post #4", published_date=datetime.datetime(2010, 1, 7, 0, 0, 0) ) blog_post_1 = BlogPost.objects.create(title="Blog Post #1", published_date=None) expected = [blog_post_1, blog_post_2, blog_post_3, blog_post_4] self.assertSequence(BlogPost.objects.order_by("published_date"), expected) self.assertSequence(BlogPost.objects.order_by("+published_date"), expected) expected.reverse() self.assertSequence(BlogPost.objects.order_by("-published_date"), expected) def test_order_by_list(self): class BlogPost(Document): title = StringField() published_date = DateTimeField(required=False) BlogPost.drop_collection() blog_post_1 = BlogPost.objects.create( title="A", published_date=datetime.datetime(2010, 1, 6, 0, 0, 0) ) blog_post_2 = BlogPost.objects.create( title="B", published_date=datetime.datetime(2010, 1, 6, 0, 0, 0) ) blog_post_3 = BlogPost.objects.create( title="C", published_date=datetime.datetime(2010, 1, 7, 0, 0, 0) ) qs = BlogPost.objects.order_by("published_date", "title") expected = [blog_post_1, blog_post_2, blog_post_3] self.assertSequence(qs, expected) qs = BlogPost.objects.order_by("-published_date", "-title") expected.reverse() self.assertSequence(qs, expected) def test_order_by_chaining(self): """Ensure that an order_by query chains properly and allows .only()""" self.Person(name="User B", age=40).save() self.Person(name="User A", age=20).save() self.Person(name="User C", age=30).save() only_age = self.Person.objects.order_by("-age").only("age") names = [p.name for p in only_age] ages = [p.age for p in only_age] # The .only('age') clause should mean that all names are None assert names == [None, None, None] assert ages == [40, 30, 20] qs = self.Person.objects.all().order_by("-age") qs = qs.limit(10) ages = [p.age for p in qs] assert ages == [40, 30, 20] qs = self.Person.objects.all().limit(10) qs = qs.order_by("-age") ages = [p.age for p in qs] assert ages == [40, 30, 20] qs = self.Person.objects.all().skip(0) qs = qs.order_by("-age") ages = [p.age for p in qs] assert ages == [40, 30, 20] def test_order_by_using_raw(self): person_a = self.Person(name="User A", age=20) person_a.save() person_b = self.Person(name="User B", age=30) person_b.save() person_c = self.Person(name="User B", age=25) person_c.save() person_d = self.Person(name="User C", age=40) person_d.save() qs = self.Person.objects.order_by(__raw__=[("name", pymongo.DESCENDING)]) assert qs._ordering == [("name", pymongo.DESCENDING)] names = [p.name for p in qs] assert names == ["User C", "User B", "User B", "User A"] names = [ (p.name, p.age) for p in self.Person.objects.order_by(__raw__=[("name", pymongo.ASCENDING)]) ] assert names == [("User A", 20), ("User B", 30), ("User B", 25), ("User C", 40)] if PYMONGO_VERSION >= (4, 4): # Pymongo >= 4.4 allow to mix single key with tuples inside the list qs = self.Person.objects.order_by( __raw__=["name", ("age", pymongo.ASCENDING)] ) names = [(p.name, p.age) for p in qs] assert names == [ ("User A", 20), ("User B", 25), ("User B", 30), ("User C", 40), ] def test_order_by_using_raw_and_keys_raises_exception(self): with pytest.raises(OperationError): self.Person.objects.order_by("-name", __raw__=[("age", pymongo.ASCENDING)]) def test_confirm_order_by_reference_wont_work(self): """Ordering by reference is not possible. Use map / reduce.. or denormalise""" class Author(Document): author = ReferenceField(self.Person) Author.drop_collection() person_a = self.Person(name="User A", age=20) person_a.save() person_b = self.Person(name="User B", age=40) person_b.save() person_c = self.Person(name="User C", age=30) person_c.save() Author(author=person_a).save() Author(author=person_b).save() Author(author=person_c).save() names = [a.author.name for a in Author.objects.order_by("-author__age")] assert names == ["User A", "User B", "User C"] def test_comment(self): """Make sure adding a comment to the query gets added to the query""" MONGO_VER = self.mongodb_version _, CMD_QUERY_KEY = get_key_compat(MONGO_VER) QUERY_KEY = "filter" COMMENT_KEY = "comment" class User(Document): age = IntField() with db_ops_tracker() as q: User.objects.filter(age__gte=18).comment("looking for an adult").first() User.objects.comment("looking for an adult").filter(age__gte=18).first() ops = q.get_ops() assert len(ops) == 2 for op in ops: assert op[CMD_QUERY_KEY][QUERY_KEY] == {"age": {"$gte": 18}} assert op[CMD_QUERY_KEY][COMMENT_KEY] == "looking for an adult" def test_map_reduce(self): """Ensure map/reduce is both mapping and reducing.""" class BlogPost(Document): title = StringField() tags = ListField(StringField(), db_field="post-tag-list") BlogPost.drop_collection() BlogPost(title="Post #1", tags=["music", "film", "print"]).save() BlogPost(title="Post #2", tags=["music", "film"]).save() BlogPost(title="Post #3", tags=["film", "photography"]).save() map_f = """ function() { this[~tags].forEach(function(tag) { emit(tag, 1); }); } """ reduce_f = """ function(key, values) { var total = 0; for(var i=0; i<values.length; i++) { total += values[i]; } return total; } """ # run a map/reduce operation spanning all posts results = BlogPost.objects.map_reduce(map_f, reduce_f, "myresults") results = list(results) assert len(results) == 4 music = list(filter(lambda r: r.key == "music", results))[0] assert music.value == 2 film = list(filter(lambda r: r.key == "film", results))[0] assert film.value == 3 BlogPost.drop_collection() def test_map_reduce_with_custom_object_ids(self): """Ensure that QuerySet.map_reduce works properly with custom primary keys. """ class BlogPost(Document): title = StringField(primary_key=True) tags = ListField(StringField()) BlogPost.drop_collection() post1 = BlogPost(title="Post #1", tags=["mongodb", "mongoengine"]) post2 = BlogPost(title="Post #2", tags=["django", "mongodb"]) post3 = BlogPost(title="Post #3", tags=["hitchcock films"]) post1.save() post2.save() post3.save() assert BlogPost._fields["title"].db_field == "_id" assert BlogPost._meta["id_field"] == "title" map_f = """ function() { emit(this._id, 1); } """ # reduce to a list of tag ids and counts reduce_f = """ function(key, values) { var total = 0; for(var i=0; i<values.length; i++) { total += values[i]; } return total; } """ results = BlogPost.objects.order_by("_id").map_reduce( map_f, reduce_f, "myresults2" ) results = list(results) assert len(results) == 3 assert results[0].object.id == post1.id assert results[1].object.id == post2.id assert results[2].object.id == post3.id BlogPost.drop_collection() def test_map_reduce_custom_output(self): """ Test map/reduce custom output """ class Family(Document): id = IntField(primary_key=True) log = StringField() class Person(Document): id = IntField(primary_key=True) name = StringField() age = IntField() family = ReferenceField(Family) Family.drop_collection() Person.drop_collection() # creating first family f1 = Family(id=1, log="Trav 02 de Julho") f1.save() # persons of first family Person(id=1, family=f1, name="Wilson Jr", age=21).save() Person(id=2, family=f1, name="Wilson Father", age=45).save() Person(id=3, family=f1, name="Eliana Costa", age=40).save() Person(id=4, family=f1, name="Tayza Mariana", age=17).save() # creating second family f2 = Family(id=2, log="Av prof frasc brunno") f2.save() # persons of second family Person(id=5, family=f2, name="Isabella Luanna", age=16).save() Person(id=6, family=f2, name="Sandra Mara", age=36).save() Person(id=7, family=f2, name="Igor Gabriel", age=10).save() # creating third family f3 = Family(id=3, log="Av brazil") f3.save() # persons of thrird family Person(id=8, family=f3, name="Arthur WA", age=30).save() Person(id=9, family=f3, name="Paula Leonel", age=25).save() # executing join map/reduce map_person = """ function () { emit(this.family, { totalAge: this.age, persons: [{ name: this.name, age: this.age }]}); } """ map_family = """ function () { emit(this._id, { totalAge: 0, persons: [] }); } """ reduce_f = """ function (key, values) { var family = {persons: [], totalAge: 0}; values.forEach(function(value) { if (value.persons) { value.persons.forEach(function (person) { family.persons.push(person); family.totalAge += person.age; }); family.persons.sort((a, b) => (a.age > b.age)) } }); return family; } """ cursor = Family.objects.map_reduce( map_f=map_family, reduce_f=reduce_f, output={"replace": "family_map", "db_alias": "test2"}, ) # start a map/reduce next(cursor) results = Person.objects.map_reduce( map_f=map_person, reduce_f=reduce_f, output={"reduce": "family_map", "db_alias": "test2"}, ) results = list(results) collection = get_db("test2").family_map assert collection.find_one({"_id": 1}) == { "_id": 1, "value": { "persons": [ {"age": 17, "name": "Tayza Mariana"}, {"age": 21, "name": "Wilson Jr"}, {"age": 40, "name": "Eliana Costa"}, {"age": 45, "name": "Wilson Father"}, ], "totalAge": 123, }, } assert collection.find_one({"_id": 2}) == { "_id": 2, "value": { "persons": [ {"age": 10, "name": "Igor Gabriel"}, {"age": 16, "name": "Isabella Luanna"}, {"age": 36, "name": "Sandra Mara"}, ], "totalAge": 62, }, } assert collection.find_one({"_id": 3}) == { "_id": 3, "value": { "persons": [ {"age": 25, "name": "Paula Leonel"}, {"age": 30, "name": "Arthur WA"}, ], "totalAge": 55, }, } def test_map_reduce_finalize(self): """Ensure that map, reduce, and finalize run and introduce "scope" by simulating "hotness" ranking with Reddit algorithm. """ from time import mktime class Link(Document): title = StringField(db_field="bpTitle") up_votes = IntField() down_votes = IntField() submitted = DateTimeField(db_field="sTime") Link.drop_collection() now = datetime.datetime.utcnow() # Note: Test data taken from a custom Reddit homepage on # Fri, 12 Feb 2010 14:36:00 -0600. Link ordering should # reflect order of insertion below, but is not influenced # by insertion order. Link( title="Google Buzz auto-followed a woman's abusive ex ...", up_votes=1079, down_votes=553, submitted=now - datetime.timedelta(hours=4), ).save() Link( title="We did it! Barbie is a computer engineer.", up_votes=481, down_votes=124, submitted=now - datetime.timedelta(hours=2), ).save() Link( title="This Is A Mosquito Getting Killed By A Laser", up_votes=1446, down_votes=530, submitted=now - datetime.timedelta(hours=13), ).save() Link( title="Arabic flashcards land physics student in jail.", up_votes=215, down_votes=105, submitted=now - datetime.timedelta(hours=6), ).save() Link( title="The Burger Lab: Presenting, the Flood Burger", up_votes=48, down_votes=17, submitted=now - datetime.timedelta(hours=5), ).save() Link( title="How to see polarization with the naked eye", up_votes=74, down_votes=13, submitted=now - datetime.timedelta(hours=10), ).save() map_f = """ function() { emit(this[~id], {up_delta: this[~up_votes] - this[~down_votes], sub_date: this[~submitted].getTime() / 1000}) } """ reduce_f = """ function(key, values) { data = values[0]; x = data.up_delta; // calculate time diff between reddit epoch and submission sec_since_epoch = data.sub_date - reddit_epoch; // calculate 'Y' if(x > 0) { y = 1; } else if (x = 0) { y = 0; } else { y = -1; } // calculate 'Z', the maximal value if(Math.abs(x) >= 1) { z = Math.abs(x); } else { z = 1; } return {x: x, y: y, z: z, t_s: sec_since_epoch}; } """ finalize_f = """ function(key, value) { // f(sec_since_epoch,y,z) = // log10(z) + ((y*sec_since_epoch) / 45000) z_10 = Math.log(value.z) / Math.log(10); weight = z_10 + ((value.y * value.t_s) / 45000); return weight; } """ # provide the reddit epoch (used for ranking) as a variable available # to all phases of the map/reduce operation: map, reduce, and finalize. reddit_epoch = mktime(datetime.datetime(2005, 12, 8, 7, 46, 43).timetuple()) scope = {"reddit_epoch": reddit_epoch} # run a map/reduce operation across all links. ordering is set # to "-value", which orders the "weight" value returned from # "finalize_f" in descending order. results = Link.objects.order_by("-value") results = results.map_reduce( map_f, reduce_f, "myresults", finalize_f=finalize_f, scope=scope ) results = list(results) # assert troublesome Buzz article is ranked 1st assert results[0].object.title.startswith("Google Buzz") # assert laser vision is ranked last assert results[-1].object.title.startswith("How to see") Link.drop_collection() def test_item_frequencies(self): """Ensure that item frequencies are properly generated from lists.""" class BlogPost(Document): hits = IntField() tags = ListField(StringField(), db_field="blogTags") BlogPost.drop_collection() BlogPost(hits=1, tags=["music", "film", "actors", "watch"]).save() BlogPost(hits=2, tags=["music", "watch"]).save() BlogPost(hits=2, tags=["music", "actors"]).save() def test_assertions(f): f = {key: int(val) for key, val in f.items()} assert {"music", "film", "actors", "watch"} == set(f.keys()) assert f["music"] == 3 assert f["actors"] == 2 assert f["watch"] == 2 assert f["film"] == 1 exec_js = BlogPost.objects.item_frequencies("tags") map_reduce = BlogPost.objects.item_frequencies("tags", map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Ensure query is taken into account def test_assertions(f): f = {key: int(val) for key, val in f.items()} assert {"music", "actors", "watch"} == set(f.keys()) assert f["music"] == 2 assert f["actors"] == 1 assert f["watch"] == 1 exec_js = BlogPost.objects(hits__gt=1).item_frequencies("tags") map_reduce = BlogPost.objects(hits__gt=1).item_frequencies( "tags", map_reduce=True ) test_assertions(exec_js) test_assertions(map_reduce) # Check that normalization works def test_assertions(f): assert round(abs(f["music"] - 3.0 / 8.0), 7) == 0 assert round(abs(f["actors"] - 2.0 / 8.0), 7) == 0 assert round(abs(f["watch"] - 2.0 / 8.0), 7) == 0 assert round(abs(f["film"] - 1.0 / 8.0), 7) == 0 exec_js = BlogPost.objects.item_frequencies("tags", normalize=True) map_reduce = BlogPost.objects.item_frequencies( "tags", normalize=True, map_reduce=True ) test_assertions(exec_js) test_assertions(map_reduce) # Check item_frequencies works for non-list fields def test_assertions(f): assert {1, 2} == set(f.keys()) assert f[1] == 1 assert f[2] == 2 exec_js = BlogPost.objects.item_frequencies("hits") map_reduce = BlogPost.objects.item_frequencies("hits", map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) BlogPost.drop_collection() def test_item_frequencies_on_embedded(self): """Ensure that item frequencies are properly generated from lists.""" class Phone(EmbeddedDocument): number = StringField() class Person(Document): name = StringField() phone = EmbeddedDocumentField(Phone) Person.drop_collection() doc = Person(name="Guido") doc.phone = Phone(number="62-3331-1656") doc.save() doc = Person(name="Marr") doc.phone = Phone(number="62-3331-1656") doc.save() doc = Person(name="WP Junior") doc.phone = Phone(number="62-3332-1656") doc.save() def test_assertions(f): f = {key: int(val) for key, val in f.items()} assert {"62-3331-1656", "62-3332-1656"} == set(f.keys()) assert f["62-3331-1656"] == 2 assert f["62-3332-1656"] == 1 exec_js = Person.objects.item_frequencies("phone.number") map_reduce = Person.objects.item_frequencies("phone.number", map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Ensure query is taken into account def test_assertions(f): f = {key: int(val) for key, val in f.items()} assert {"62-3331-1656"} == set(f.keys()) assert f["62-3331-1656"] == 2 exec_js = Person.objects(phone__number="62-3331-1656").item_frequencies( "phone.number" ) map_reduce = Person.objects(phone__number="62-3331-1656").item_frequencies( "phone.number", map_reduce=True ) test_assertions(exec_js) test_assertions(map_reduce) # Check that normalization works def test_assertions(f): assert f["62-3331-1656"] == 2.0 / 3.0 assert f["62-3332-1656"] == 1.0 / 3.0 exec_js = Person.objects.item_frequencies("phone.number", normalize=True) map_reduce = Person.objects.item_frequencies( "phone.number", normalize=True, map_reduce=True ) test_assertions(exec_js) test_assertions(map_reduce) def test_item_frequencies_null_values(self): class Person(Document): name = StringField() city = StringField() Person.drop_collection() Person(name="Wilson Snr", city="CRB").save() Person(name="Wilson Jr").save() freq = Person.objects.item_frequencies("city") assert freq == {"CRB": 1.0, None: 1.0} freq = Person.objects.item_frequencies("city", normalize=True) assert freq == {"CRB": 0.5, None: 0.5} freq = Person.objects.item_frequencies("city", map_reduce=True) assert freq == {"CRB": 1.0, None: 1.0} freq = Person.objects.item_frequencies("city", normalize=True, map_reduce=True) assert freq == {"CRB": 0.5, None: 0.5} @requires_mongodb_lt_42 def test_item_frequencies_with_null_embedded(self): class Data(EmbeddedDocument): name = StringField() class Extra(EmbeddedDocument): tag = StringField() class Person(Document): data = EmbeddedDocumentField(Data, required=True) extra = EmbeddedDocumentField(Extra) Person.drop_collection() p = Person() p.data = Data(name="Wilson Jr") p.save() p = Person() p.data = Data(name="Wesley") p.extra = Extra(tag="friend") p.save() ot = Person.objects.item_frequencies("extra.tag", map_reduce=False) assert ot == {None: 1.0, "friend": 1.0} ot = Person.objects.item_frequencies("extra.tag", map_reduce=True) assert ot == {None: 1.0, "friend": 1.0} @requires_mongodb_lt_42 def test_item_frequencies_with_0_values(self): class Test(Document): val = IntField() Test.drop_collection() t = Test() t.val = 0 t.save() ot = Test.objects.item_frequencies("val", map_reduce=True) assert ot == {0: 1} ot = Test.objects.item_frequencies("val", map_reduce=False) assert ot == {0: 1} @requires_mongodb_lt_42 def test_item_frequencies_with_False_values(self): class Test(Document): val = BooleanField() Test.drop_collection() t = Test() t.val = False t.save() ot = Test.objects.item_frequencies("val", map_reduce=True) assert ot == {False: 1} ot = Test.objects.item_frequencies("val", map_reduce=False) assert ot == {False: 1} @requires_mongodb_lt_42 def test_item_frequencies_normalize(self): class Test(Document): val = IntField() Test.drop_collection() for _ in range(50): Test(val=1).save() for _ in range(20): Test(val=2).save() freqs = Test.objects.item_frequencies("val", map_reduce=False, normalize=True) assert freqs == {1: 50.0 / 70, 2: 20.0 / 70} freqs = Test.objects.item_frequencies("val", map_reduce=True, normalize=True) assert freqs == {1: 50.0 / 70, 2: 20.0 / 70} def test_average(self): """Ensure that field can be averaged correctly.""" self.Person(name="person", age=0).save() assert int(self.Person.objects.average("age")) == 0 ages = [23, 54, 12, 94, 27] for i, age in enumerate(ages): self.Person(name="test%s" % i, age=age).save() avg = float(sum(ages)) / (len(ages) + 1) # take into account the 0 assert round(abs(int(self.Person.objects.average("age")) - avg), 7) == 0 self.Person(name="ageless person").save() assert int(self.Person.objects.average("age")) == avg # dot notation self.Person(name="person meta", person_meta=self.PersonMeta(weight=0)).save() assert ( round(abs(int(self.Person.objects.average("person_meta.weight")) - 0), 7) == 0 ) for i, weight in enumerate(ages): self.Person( name=f"test meta{i}", person_meta=self.PersonMeta(weight=weight) ).save() assert ( round(abs(int(self.Person.objects.average("person_meta.weight")) - avg), 7) == 0 ) self.Person(name="test meta none").save() assert int(self.Person.objects.average("person_meta.weight")) == avg # test summing over a filtered queryset over_50 = [a for a in ages if a >= 50] avg = float(sum(over_50)) / len(over_50) assert self.Person.objects.filter(age__gte=50).average("age") == avg def test_sum(self): """Ensure that field can be summed over correctly.""" ages = [23, 54, 12, 94, 27] for i, age in enumerate(ages): self.Person(name="test%s" % i, age=age).save() assert self.Person.objects.sum("age") == sum(ages) self.Person(name="ageless person").save() assert self.Person.objects.sum("age") == sum(ages) for i, age in enumerate(ages): self.Person( name="test meta%s" % i, person_meta=self.PersonMeta(weight=age) ).save() assert self.Person.objects.sum("person_meta.weight") == sum(ages) self.Person(name="weightless person").save() assert self.Person.objects.sum("age") == sum(ages) # test summing over a filtered queryset assert self.Person.objects.filter(age__gte=50).sum("age") == sum( a for a in ages if a >= 50 ) def test_sum_over_db_field(self): """Ensure that a field mapped to a db field with a different name can be summed over correctly. """ class UserVisit(Document): num_visits = IntField(db_field="visits") UserVisit.drop_collection() UserVisit.objects.create(num_visits=10) UserVisit.objects.create(num_visits=5) assert UserVisit.objects.sum("num_visits") == 15 def test_average_over_db_field(self): """Ensure that a field mapped to a db field with a different name can have its average computed correctly. """ class UserVisit(Document): num_visits = IntField(db_field="visits") UserVisit.drop_collection() UserVisit.objects.create(num_visits=20) UserVisit.objects.create(num_visits=10) assert UserVisit.objects.average("num_visits") == 15 def test_embedded_average(self): class Pay(EmbeddedDocument): value = DecimalField() class Doc(Document): name = StringField() pay = EmbeddedDocumentField(Pay) Doc.drop_collection() Doc(name="Wilson Junior", pay=Pay(value=150)).save() Doc(name="Isabella Luanna", pay=Pay(value=530)).save() Doc(name="Tayza mariana", pay=Pay(value=165)).save() Doc(name="Eliana Costa", pay=Pay(value=115)).save() assert Doc.objects.average("pay.value") == 240 def test_embedded_array_average(self): class Pay(EmbeddedDocument): values = ListField(DecimalField()) class Doc(Document): name = StringField() pay = EmbeddedDocumentField(Pay) Doc.drop_collection() Doc(name="Wilson Junior", pay=Pay(values=[150, 100])).save() Doc(name="Isabella Luanna", pay=Pay(values=[530, 100])).save() Doc(name="Tayza mariana", pay=Pay(values=[165, 100])).save() Doc(name="Eliana Costa", pay=Pay(values=[115, 100])).save() assert Doc.objects.average("pay.values") == 170 def test_array_average(self): class Doc(Document): values = ListField(DecimalField()) Doc.drop_collection() Doc(values=[150, 100]).save() Doc(values=[530, 100]).save() Doc(values=[165, 100]).save() Doc(values=[115, 100]).save() assert Doc.objects.average("values") == 170 def test_embedded_sum(self): class Pay(EmbeddedDocument): value = DecimalField() class Doc(Document): name = StringField() pay = EmbeddedDocumentField(Pay) Doc.drop_collection() Doc(name="Wilson Junior", pay=Pay(value=150)).save() Doc(name="Isabella Luanna", pay=Pay(value=530)).save() Doc(name="Tayza mariana", pay=Pay(value=165)).save() Doc(name="Eliana Costa", pay=Pay(value=115)).save() assert Doc.objects.sum("pay.value") == 960 def test_embedded_array_sum(self): class Pay(EmbeddedDocument): values = ListField(DecimalField()) class Doc(Document): name = StringField() pay = EmbeddedDocumentField(Pay) Doc.drop_collection() Doc(name="Wilson Junior", pay=Pay(values=[150, 100])).save() Doc(name="Isabella Luanna", pay=Pay(values=[530, 100])).save() Doc(name="Tayza mariana", pay=Pay(values=[165, 100])).save() Doc(name="Eliana Costa", pay=Pay(values=[115, 100])).save() assert Doc.objects.sum("pay.values") == 1360 def test_array_sum(self): class Doc(Document): values = ListField(DecimalField()) Doc.drop_collection() Doc(values=[150, 100]).save() Doc(values=[530, 100]).save() Doc(values=[165, 100]).save() Doc(values=[115, 100]).save() assert Doc.objects.sum("values") == 1360 def test_distinct(self): """Ensure that the QuerySet.distinct method works.""" self.Person(name="Mr Orange", age=20).save() self.Person(name="Mr White", age=20).save() self.Person(name="Mr Orange", age=30).save() self.Person(name="Mr Pink", age=30).save() assert set(self.Person.objects.distinct("name")) == { "Mr Orange", "Mr White", "Mr Pink", } assert set(self.Person.objects.distinct("age")) == {20, 30} assert set(self.Person.objects(age=30).distinct("name")) == { "Mr Orange", "Mr Pink", } def test_distinct_handles_references(self): class Foo(Document): bar = ReferenceField("Bar") class Bar(Document): text = StringField() Bar.drop_collection() Foo.drop_collection() bar = Bar(text="hi") bar.save() foo = Foo(bar=bar) foo.save() assert Foo.objects.distinct("bar") == [bar] assert Foo.objects.no_dereference().distinct("bar") == [bar.pk] def test_base_queryset_iter_raise_not_implemented(self): class Tmp(Document): pass qs = BaseQuerySet(document=Tmp, collection=Tmp._get_collection()) with pytest.raises(NotImplementedError): _ = list(qs) def test_search_text_raise_if_called_2_times(self): class News(Document): title = StringField() content = StringField() is_active = BooleanField(default=True) News.drop_collection() with pytest.raises(OperationError): News.objects.search_text("t1", language="portuguese").search_text( "t2", language="french" ) def test_search_text(self): class News(Document): title = StringField() content = StringField() is_active = BooleanField(default=True) meta = { "indexes": [ { "fields": ["$title", "$content"], "default_language": "portuguese", "weights": {"title": 10, "content": 2}, } ] } News.drop_collection() info = News.objects._collection.index_information() assert "title_text_content_text" in info assert "textIndexVersion" in info["title_text_content_text"] News( title="Neymar quebrou a vertebra", content="O Brasil sofre com a perda de Neymar", ).save() News( title="Brasil passa para as quartas de finais", content="Com o brasil nas quartas de finais teremos um " "jogo complicado com a alemanha", ).save() count = News.objects.search_text("neymar", language="portuguese").count() assert count == 1 count = News.objects.search_text("brasil -neymar").count() assert count == 1 News( title="As eleições no Brasil já estão em planejamento", content="A candidata dilma roussef já começa o teu planejamento", is_active=False, ).save() new = News.objects(is_active=False).search_text("dilma", language="pt").first() query = News.objects(is_active=False).search_text("dilma", language="pt")._query assert query == { "$text": {"$search": "dilma", "$language": "pt"}, "is_active": False, } assert not new.is_active assert "dilma" in new.content assert "planejamento" in new.title query = News.objects.search_text("candidata", text_score=True) assert query._search_text == "candidata" new = query.first() assert isinstance(new.get_text_score(), float) # count query = News.objects.search_text("brasil", text_score=True).order_by( "$text_score" ) assert query._search_text == "brasil" assert query.count() == 3 assert query._query == {"$text": {"$search": "brasil"}} cursor_args = query._cursor_args cursor_args_fields = cursor_args["projection"] assert cursor_args_fields == {"_text_score": {"$meta": "textScore"}} text_scores = [i.get_text_score() for i in query] assert len(text_scores) == 3 assert text_scores[0] > text_scores[1] assert text_scores[1] > text_scores[2] max_text_score = text_scores[0] # get item item = News.objects.search_text("brasil").order_by("$text_score").first() assert item.get_text_score() == max_text_score # Verify query reproducibility when text_score is disabled # Following wouldn't work for text_score=True #2759 for i in range(10): qs1 = News.objects.search_text("brasil", text_score=False) qs2 = News.objects.search_text("brasil", text_score=False) assert list(qs1) == list(qs2) def test_distinct_handles_references_to_alias(self): register_connection("testdb", "mongoenginetest2") class Foo(Document): bar = ReferenceField("Bar") meta = {"db_alias": "testdb"} class Bar(Document): text = StringField() meta = {"db_alias": "testdb"} Bar.drop_collection() Foo.drop_collection() bar = Bar(text="hi") bar.save() foo = Foo(bar=bar) foo.save() assert Foo.objects.distinct("bar") == [bar] def test_distinct_handles_db_field(self): """Ensure that distinct resolves field name to db_field as expected.""" class Product(Document): product_id = IntField(db_field="pid") Product.drop_collection() Product(product_id=1).save() Product(product_id=2).save() Product(product_id=1).save() assert set(Product.objects.distinct("product_id")) == {1, 2} assert set(Product.objects.distinct("pid")) == {1, 2} Product.drop_collection() def test_distinct_ListField_EmbeddedDocumentField(self): class Author(EmbeddedDocument): name = StringField() class Book(Document): title = StringField() authors = ListField(EmbeddedDocumentField(Author)) Book.drop_collection() mark_twain = Author(name="Mark Twain") john_tolkien = Author(name="John Ronald Reuel Tolkien") Book.objects.create(title="Tom Sawyer", authors=[mark_twain]) Book.objects.create(title="The Lord of the Rings", authors=[john_tolkien]) Book.objects.create(title="The Stories", authors=[mark_twain, john_tolkien]) authors = Book.objects.distinct("authors") authors_names = {author.name for author in authors} assert authors_names == {mark_twain.name, john_tolkien.name} def test_distinct_ListField_EmbeddedDocumentField_EmbeddedDocumentField(self): class Continent(EmbeddedDocument): continent_name = StringField() class Country(EmbeddedDocument): country_name = StringField() continent = EmbeddedDocumentField(Continent) class Author(EmbeddedDocument): name = StringField() country = EmbeddedDocumentField(Country) class Book(Document): title = StringField() authors = ListField(EmbeddedDocumentField(Author)) Book.drop_collection() europe = Continent(continent_name="europe") asia = Continent(continent_name="asia") scotland = Country(country_name="Scotland", continent=europe) tibet = Country(country_name="Tibet", continent=asia) mark_twain = Author(name="Mark Twain", country=scotland) john_tolkien = Author(name="John Ronald Reuel Tolkien", country=tibet) Book.objects.create(title="Tom Sawyer", authors=[mark_twain]) Book.objects.create(title="The Lord of the Rings", authors=[john_tolkien]) Book.objects.create(title="The Stories", authors=[mark_twain, john_tolkien]) country_list = Book.objects.distinct("authors.country") assert country_list == [scotland, tibet] continent_list = Book.objects.distinct("authors.country.continent") continent_list_names = {c.continent_name for c in continent_list} assert continent_list_names == {europe.continent_name, asia.continent_name} def test_distinct_ListField_ReferenceField(self): class Bar(Document): text = StringField() class Foo(Document): bar = ReferenceField("Bar") bar_lst = ListField(ReferenceField("Bar")) Bar.drop_collection() Foo.drop_collection() bar_1 = Bar(text="hi") bar_1.save() bar_2 = Bar(text="bye") bar_2.save() foo = Foo(bar=bar_1, bar_lst=[bar_1, bar_2]) foo.save() assert Foo.objects.distinct("bar_lst") == [bar_1, bar_2] assert Foo.objects.no_dereference().distinct("bar_lst") == [bar_1.pk, bar_2.pk] def test_custom_manager(self): """Ensure that custom QuerySetManager instances work as expected.""" class BlogPost(Document): tags = ListField(StringField()) deleted = BooleanField(default=False) date = DateTimeField(default=datetime.datetime.now) @queryset_manager def objects(cls, qryset): opts = {"deleted": False} return qryset(**opts) @queryset_manager def objects_1_arg(qryset): opts = {"deleted": False} return qryset(**opts) @queryset_manager def music_posts(doc_cls, queryset, deleted=False): return queryset(tags="music", deleted=deleted).order_by("date") BlogPost.drop_collection() post1 = BlogPost(tags=["music", "film"]).save() post2 = BlogPost(tags=["music"]).save() post3 = BlogPost(tags=["film", "actors"]).save() post4 = BlogPost(tags=["film", "actors", "music"], deleted=True).save() assert [p.id for p in BlogPost.objects()] == [post1.id, post2.id, post3.id] assert [p.id for p in BlogPost.objects_1_arg()] == [ post1.id, post2.id, post3.id, ] assert [p.id for p in BlogPost.music_posts()] == [post1.id, post2.id] assert [p.id for p in BlogPost.music_posts(True)] == [post4.id] BlogPost.drop_collection() def test_custom_manager_overriding_objects_works(self): class Foo(Document): bar = StringField(default="bar") active = BooleanField(default=False) @queryset_manager def objects(doc_cls, queryset): return queryset(active=True) @queryset_manager def with_inactive(doc_cls, queryset): return queryset(active=False) Foo.drop_collection() Foo(active=True).save() Foo(active=False).save() assert 1 == Foo.objects.count() assert 1 == Foo.with_inactive.count() Foo.with_inactive.first().delete() assert 0 == Foo.with_inactive.count() assert 1 == Foo.objects.count() def test_inherit_objects(self): class Foo(Document): meta = {"allow_inheritance": True} active = BooleanField(default=True) @queryset_manager def objects(klass, queryset): return queryset(active=True) class Bar(Foo): pass Bar.drop_collection() Bar.objects.create(active=False) assert 0 == Bar.objects.count() def test_inherit_objects_override(self): class Foo(Document): meta = {"allow_inheritance": True} active = BooleanField(default=True) @queryset_manager def objects(klass, queryset): return queryset(active=True) class Bar(Foo): @queryset_manager def objects(klass, queryset): return queryset(active=False) Bar.drop_collection() Bar.objects.create(active=False) assert 0 == Foo.objects.count() assert 1 == Bar.objects.count() def test_query_value_conversion(self): """Ensure that query values are properly converted when necessary.""" class BlogPost(Document): author = ReferenceField(self.Person) BlogPost.drop_collection() person = self.Person(name="test", age=30) person.save() post = BlogPost(author=person) post.save() # Test that query may be performed by providing a document as a value # while using a ReferenceField's name - the document should be # converted to an DBRef, which is legal, unlike a Document object post_obj = BlogPost.objects(author=person).first() assert post.id == post_obj.id # Test that lists of values work when using the 'in', 'nin' and 'all' post_obj = BlogPost.objects(author__in=[person]).first() assert post.id == post_obj.id BlogPost.drop_collection() def test_update_value_conversion(self): """Ensure that values used in updates are converted before use.""" class Group(Document): members = ListField(ReferenceField(self.Person)) Group.drop_collection() user1 = self.Person(name="user1") user1.save() user2 = self.Person(name="user2") user2.save() group = Group() group.save() Group.objects(id=group.id).update(set__members=[user1, user2]) group.reload() assert len(group.members) == 2 assert group.members[0].name == user1.name assert group.members[1].name == user2.name Group.drop_collection() def test_bulk(self): """Ensure bulk querying by object id returns a proper dict.""" class BlogPost(Document): title = StringField() BlogPost.drop_collection() post_1 = BlogPost(title="Post #1") post_2 = BlogPost(title="Post #2") post_3 = BlogPost(title="Post #3") post_4 = BlogPost(title="Post #4") post_5 = BlogPost(title="Post #5") post_1.save() post_2.save() post_3.save() post_4.save() post_5.save() ids = [post_1.id, post_2.id, post_5.id] objects = BlogPost.objects.in_bulk(ids) assert len(objects) == 3 assert post_1.id in objects assert post_2.id in objects assert post_5.id in objects assert objects[post_1.id].title == post_1.title assert objects[post_2.id].title == post_2.title assert objects[post_5.id].title == post_5.title objects = BlogPost.objects.as_pymongo().in_bulk(ids) assert len(objects) == 3 assert isinstance(objects[post_1.id], dict) BlogPost.drop_collection() def tearDown(self): self.Person.drop_collection() def test_custom_querysets(self): """Ensure that custom QuerySet classes may be used.""" class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class Post(Document): meta = {"queryset_class": CustomQuerySet} Post.drop_collection() assert isinstance(Post.objects, CustomQuerySet) assert not Post.objects.not_empty() Post().save() assert Post.objects.not_empty() Post.drop_collection() def test_custom_querysets_set_manager_directly(self): """Ensure that custom QuerySet classes may be used.""" class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class CustomQuerySetManager(QuerySetManager): queryset_class = CustomQuerySet class Post(Document): objects = CustomQuerySetManager() Post.drop_collection() assert isinstance(Post.objects, CustomQuerySet) assert not Post.objects.not_empty() Post().save() assert Post.objects.not_empty() Post.drop_collection() def test_custom_querysets_set_manager_methods(self): """Ensure that custom QuerySet classes methods may be used.""" class CustomQuerySet(QuerySet): def delete(self, *args, **kwargs): """Example of method when one want to change default behaviour of it""" return 0 class CustomQuerySetManager(QuerySetManager): queryset_class = CustomQuerySet class Post(Document): objects = CustomQuerySetManager() Post.drop_collection() assert isinstance(Post.objects, CustomQuerySet) assert Post.objects.delete() == 0 post = Post() post.save() assert Post.objects.count() == 1 post.delete() assert Post.objects.count() == 1 Post.drop_collection() def test_custom_querysets_managers_directly(self): """Ensure that custom QuerySet classes may be used.""" class CustomQuerySetManager(QuerySetManager): @staticmethod def get_queryset(doc_cls, queryset): return queryset(is_published=True) class Post(Document): is_published = BooleanField(default=False) published = CustomQuerySetManager() Post.drop_collection() Post().save() Post(is_published=True).save() assert Post.objects.count() == 2 assert Post.published.count() == 1 Post.drop_collection() def test_custom_querysets_inherited(self): """Ensure that custom QuerySet classes may be used.""" class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class Base(Document): meta = {"abstract": True, "queryset_class": CustomQuerySet} class Post(Base): pass Post.drop_collection() assert isinstance(Post.objects, CustomQuerySet) assert not Post.objects.not_empty() Post().save() assert Post.objects.not_empty() Post.drop_collection() def test_custom_querysets_inherited_direct(self): """Ensure that custom QuerySet classes may be used.""" class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class CustomQuerySetManager(QuerySetManager): queryset_class = CustomQuerySet class Base(Document): meta = {"abstract": True} objects = CustomQuerySetManager() class Post(Base): pass Post.drop_collection() assert isinstance(Post.objects, CustomQuerySet) assert not Post.objects.not_empty() Post().save() assert Post.objects.not_empty() Post.drop_collection() def test_count_limit_and_skip(self): class Post(Document): title = StringField() Post.drop_collection() for i in range(10): Post(title="Post %s" % i).save() assert 5 == Post.objects.limit(5).skip(5).count(with_limit_and_skip=True) assert 10 == Post.objects.limit(5).skip(5).count(with_limit_and_skip=False) def test_count_and_none(self): """Test count works with None()""" class MyDoc(Document): pass MyDoc.drop_collection() for i in range(0, 10): MyDoc().save() assert MyDoc.objects.count() == 10 assert MyDoc.objects.none().count() == 0 def test_count_list_embedded(self): class B(EmbeddedDocument): c = StringField() class A(Document): b = ListField(EmbeddedDocumentField(B)) assert A.objects(b=[{"c": "c"}]).count() == 0 def test_call_after_limits_set(self): """Ensure that re-filtering after slicing works""" class Post(Document): title = StringField() Post.drop_collection() Post(title="Post 1").save() Post(title="Post 2").save() posts = Post.objects.all()[0:1] assert len(list(posts())) == 1 Post.drop_collection() def test_order_then_filter(self): """Ensure that ordering still works after filtering.""" class Number(Document): n = IntField() Number.drop_collection() n2 = Number.objects.create(n=2) n1 = Number.objects.create(n=1) assert list(Number.objects) == [n2, n1] assert list(Number.objects.order_by("n")) == [n1, n2] assert list(Number.objects.order_by("n").filter()) == [n1, n2] Number.drop_collection() def test_clone(self): """Ensure that cloning clones complex querysets""" class Number(Document): n = IntField() Number.drop_collection() for i in range(1, 101): t = Number(n=i) t.save() test = Number.objects test2 = test.clone() assert test != test2 assert test.count() == test2.count() test = test.filter(n__gt=11) test2 = test.clone() assert test != test2 assert test.count() == test2.count() test = test.limit(10) test2 = test.clone() assert test != test2 assert test.count() == test2.count() Number.drop_collection() def test_clone_retains_settings(self): """Ensure that cloning retains the read_preference and read_concern""" class Number(Document): n = IntField() Number.drop_collection() qs = Number.objects qs_clone = qs.clone() assert qs._read_preference == qs_clone._read_preference assert qs._read_concern == qs_clone._read_concern qs = Number.objects.read_preference(ReadPreference.PRIMARY_PREFERRED) qs_clone = qs.clone() assert qs._read_preference == ReadPreference.PRIMARY_PREFERRED assert qs._read_preference == qs_clone._read_preference qs = Number.objects.read_concern({"level": "majority"}) qs_clone = qs.clone() assert qs._read_concern.document == {"level": "majority"} assert qs._read_concern == qs_clone._read_concern Number.drop_collection() def test_using(self): """Ensure that switching databases for a queryset is possible""" class Number2(Document): n = IntField() Number2.drop_collection() with switch_db(Number2, "test2") as Number2: Number2.drop_collection() for i in range(1, 10): t = Number2(n=i) t.switch_db("test2") t.save() assert len(Number2.objects.using("test2")) == 9 def test_unset_reference(self): class Comment(Document): text = StringField() class Post(Document): comment = ReferenceField(Comment) Comment.drop_collection() Post.drop_collection() comment = Comment.objects.create(text="test") post = Post.objects.create(comment=comment) assert post.comment == comment Post.objects.update(unset__comment=1) post.reload() assert post.comment is None Comment.drop_collection() Post.drop_collection() def test_order_works_with_custom_db_field_names(self): class Number(Document): n = IntField(db_field="number") Number.drop_collection() n2 = Number.objects.create(n=2) n1 = Number.objects.create(n=1) assert list(Number.objects) == [n2, n1] assert list(Number.objects.order_by("n")) == [n1, n2] Number.drop_collection() def test_order_works_with_primary(self): """Ensure that order_by and primary work.""" class Number(Document): n = IntField(primary_key=True) Number.drop_collection() Number(n=1).save() Number(n=2).save() Number(n=3).save() numbers = [n.n for n in Number.objects.order_by("-n")] assert [3, 2, 1] == numbers numbers = [n.n for n in Number.objects.order_by("+n")] assert [1, 2, 3] == numbers Number.drop_collection() def test_create_index(self): """Ensure that manual creation of indexes works.""" class Comment(Document): message = StringField() meta = {"allow_inheritance": True} Comment.create_index("message") info = Comment.objects._collection.index_information() info = [ (value["key"], value.get("unique", False), value.get("sparse", False)) for key, value in info.items() ] assert ([("_cls", 1), ("message", 1)], False, False) in info def test_where_query(self): """Ensure that where clauses work.""" class IntPair(Document): fielda = IntField() fieldb = IntField() IntPair.drop_collection() a = IntPair(fielda=1, fieldb=1) b = IntPair(fielda=1, fieldb=2) c = IntPair(fielda=2, fieldb=1) a.save() b.save() c.save() query = IntPair.objects.where("this[~fielda] >= this[~fieldb]") assert 'this["fielda"] >= this["fieldb"]' == query._where_clause results = list(query) assert 2 == len(results) assert a in results assert c in results query = IntPair.objects.where("this[~fielda] == this[~fieldb]") results = list(query) assert 1 == len(results) assert a in results query = IntPair.objects.where( "function() { return this[~fielda] >= this[~fieldb] }" ) assert ( 'function() { return this["fielda"] >= this["fieldb"] }' == query._where_clause ) results = list(query) assert 2 == len(results) assert a in results assert c in results with pytest.raises(TypeError): list(IntPair.objects.where(fielda__gte=3)) def test_where_query_field_name_subs(self): class DomainObj(Document): field_1 = StringField(db_field="field_2") DomainObj.drop_collection() DomainObj(field_1="test").save() obj = DomainObj.objects.where("this[~field_1] == 'NOTMATCHING'") assert not list(obj) obj = DomainObj.objects.where("this[~field_1] == 'test'") assert list(obj) def test_where_modify(self): class DomainObj(Document): field = StringField() DomainObj.drop_collection() DomainObj(field="test").save() obj = DomainObj.objects.where("this[~field] == 'NOTMATCHING'") assert not list(obj) obj = DomainObj.objects.where("this[~field] == 'test'") assert list(obj) qs = DomainObj.objects.where("this[~field] == 'NOTMATCHING'").modify( field="new" ) assert not qs qs = DomainObj.objects.where("this[~field] == 'test'").modify(field="new") assert qs def test_where_modify_field_name_subs(self): class DomainObj(Document): field_1 = StringField(db_field="field_2") DomainObj.drop_collection() DomainObj(field_1="test").save() obj = DomainObj.objects.where("this[~field_1] == 'NOTMATCHING'").modify( field_1="new" ) assert not obj obj = DomainObj.objects.where("this[~field_1] == 'test'").modify(field_1="new") assert obj assert get_as_pymongo(obj) == {"_id": obj.id, "field_2": "new"} def test_scalar(self): class Organization(Document): name = StringField() class User(Document): name = StringField() organization = ObjectIdField() User.drop_collection() Organization.drop_collection() whitehouse = Organization(name="White House") whitehouse.save() User(name="Bob Dole", organization=whitehouse.id).save() # Efficient way to get all unique organization names for a given # set of users (Pretend this has additional filtering.) user_orgs = set(User.objects.scalar("organization")) orgs = Organization.objects(id__in=user_orgs).scalar("name") assert list(orgs) == ["White House"] # Efficient for generating listings, too. orgs = Organization.objects.scalar("name").in_bulk(list(user_orgs)) user_map = User.objects.scalar("name", "organization") user_listing = [(user, orgs[org]) for user, org in user_map] assert [("Bob Dole", "White House")] == user_listing def test_scalar_simple(self): class TestDoc(Document): x = IntField() y = BooleanField() TestDoc.drop_collection() TestDoc(x=10, y=True).save() TestDoc(x=20, y=False).save() TestDoc(x=30, y=True).save() plist = list(TestDoc.objects.scalar("x", "y")) assert len(plist) == 3 assert plist[0] == (10, True) assert plist[1] == (20, False) assert plist[2] == (30, True) class UserDoc(Document): name = StringField() age = IntField() UserDoc.drop_collection() UserDoc(name="Wilson Jr", age=19).save() UserDoc(name="Wilson", age=43).save() UserDoc(name="Eliana", age=37).save() UserDoc(name="Tayza", age=15).save() ulist = list(UserDoc.objects.scalar("name", "age")) assert ulist == [ ("Wilson Jr", 19), ("Wilson", 43), ("Eliana", 37), ("Tayza", 15), ] ulist = list(UserDoc.objects.scalar("name").order_by("age")) assert ulist == [("Tayza"), ("Wilson Jr"), ("Eliana"), ("Wilson")] def test_scalar_embedded(self): class Profile(EmbeddedDocument): name = StringField() age = IntField() class Locale(EmbeddedDocument): city = StringField() country = StringField() class Person(Document): profile = EmbeddedDocumentField(Profile) locale = EmbeddedDocumentField(Locale) Person.drop_collection() Person( profile=Profile(name="Wilson Jr", age=19), locale=Locale(city="Corumba-GO", country="Brazil"), ).save() Person( profile=Profile(name="Gabriel Falcao", age=23), locale=Locale(city="New York", country="USA"), ).save() Person( profile=Profile(name="Lincoln de souza", age=28), locale=Locale(city="Belo Horizonte", country="Brazil"), ).save() Person( profile=Profile(name="Walter cruz", age=30), locale=Locale(city="Brasilia", country="Brazil"), ).save() assert list( Person.objects.order_by("profile__age").scalar("profile__name") ) == ["Wilson Jr", "Gabriel Falcao", "Lincoln de souza", "Walter cruz"] ulist = list( Person.objects.order_by("locale.city").scalar( "profile__name", "profile__age", "locale__city" ) ) assert ulist == [ ("Lincoln de souza", 28, "Belo Horizonte"), ("Walter cruz", 30, "Brasilia"), ("Wilson Jr", 19, "Corumba-GO"), ("Gabriel Falcao", 23, "New York"), ] def test_scalar_decimal(self): from decimal import Decimal class Person(Document): name = StringField() rating = DecimalField() Person.drop_collection() Person(name="Wilson Jr", rating=Decimal("1.0")).save() ulist = list(Person.objects.scalar("name", "rating")) assert ulist == [("Wilson Jr", Decimal("1.0"))] def test_scalar_reference_field(self): class State(Document): name = StringField() class Person(Document): name = StringField() state = ReferenceField(State) State.drop_collection() Person.drop_collection() s1 = State(name="Goias") s1.save() Person(name="Wilson JR", state=s1).save() plist = list(Person.objects.scalar("name", "state")) assert plist == [("Wilson JR", s1)] def test_scalar_generic_reference_field(self): class State(Document): name = StringField() class Person(Document): name = StringField() state = GenericReferenceField() State.drop_collection() Person.drop_collection() s1 = State(name="Goias") s1.save() Person(name="Wilson JR", state=s1).save() plist = list(Person.objects.scalar("name", "state")) assert plist == [("Wilson JR", s1)] def test_generic_reference_field_with_only_and_as_pymongo(self): class TestPerson(Document): name = StringField() class TestActivity(Document): name = StringField() owner = GenericReferenceField() TestPerson.drop_collection() TestActivity.drop_collection() person = TestPerson(name="owner") person.save() a1 = TestActivity(name="a1", owner=person) a1.save() activity = ( TestActivity.objects(owner=person) .scalar("id", "owner") .no_dereference() .first() ) assert activity[0] == a1.pk assert activity[1]["_ref"] == DBRef("test_person", person.pk) activity = TestActivity.objects(owner=person).only("id", "owner")[0] assert activity.pk == a1.pk assert activity.owner == person activity = ( TestActivity.objects(owner=person).only("id", "owner").as_pymongo().first() ) assert activity["_id"] == a1.pk assert activity["owner"]["_ref"], DBRef("test_person", person.pk) def test_scalar_db_field(self): class TestDoc(Document): x = IntField() y = BooleanField() TestDoc.drop_collection() TestDoc(x=10, y=True).save() TestDoc(x=20, y=False).save() TestDoc(x=30, y=True).save() plist = list(TestDoc.objects.scalar("x", "y")) assert len(plist) == 3 assert plist[0] == (10, True) assert plist[1] == (20, False) assert plist[2] == (30, True) def test_scalar_primary_key(self): class SettingValue(Document): key = StringField(primary_key=True) value = StringField() SettingValue.drop_collection() s = SettingValue(key="test", value="test value") s.save() val = SettingValue.objects.scalar("key", "value") assert list(val) == [("test", "test value")] def test_scalar_cursor_behaviour(self): """Ensure that a query returns a valid set of results.""" person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Find all people in the collection people = self.Person.objects.scalar("name") assert people.count() == 2 results = list(people) assert results[0] == "User A" assert results[1] == "User B" # Use a query to filter the people found to just person1 people = self.Person.objects(age=20).scalar("name") assert people.count() == 1 person = next(people) assert person == "User A" # Test limit people = list(self.Person.objects.limit(1).scalar("name")) assert len(people) == 1 assert people[0] == "User A" # Test skip people = list(self.Person.objects.skip(1).scalar("name")) assert len(people) == 1 assert people[0] == "User B" person3 = self.Person(name="User C", age=40) person3.save() # Test slice limit people = list(self.Person.objects[:2].scalar("name")) assert len(people) == 2 assert people[0] == "User A" assert people[1] == "User B" # Test slice skip people = list(self.Person.objects[1:].scalar("name")) assert len(people) == 2 assert people[0] == "User B" assert people[1] == "User C" # Test slice limit and skip people = list(self.Person.objects[1:2].scalar("name")) assert len(people) == 1 assert people[0] == "User B" # people = list(self.Person.objects[1:1].scalar("name")) people = self.Person.objects[1:1] people = people.scalar("name") assert len(people) == 0 # Test slice out of range people = list(self.Person.objects.scalar("name")[80000:80001]) assert len(people) == 0 # Test larger slice __repr__ self.Person.objects.delete() for i in range(55): self.Person(name="A%s" % i, age=i).save() assert self.Person.objects.scalar("name").count() == 55 assert ( "A0" == "%s" % self.Person.objects.order_by("name").scalar("name").first() ) assert "A0" == "%s" % self.Person.objects.scalar("name").order_by("name")[0] assert ( "['A1', 'A2']" == "%s" % self.Person.objects.order_by("age").scalar("name")[1:3] ) assert ( "['A51', 'A52']" == "%s" % self.Person.objects.order_by("age").scalar("name")[51:53] ) # with_id and in_bulk person = self.Person.objects.order_by("name").first() assert "A0" == "%s" % self.Person.objects.scalar("name").with_id(person.id) pks = self.Person.objects.order_by("age").scalar("pk")[1:3] names = self.Person.objects.scalar("name").in_bulk(list(pks)).values() expected = "['A1', 'A2']" assert expected == "%s" % sorted(names) def test_fields(self): class Bar(EmbeddedDocument): v = StringField() z = StringField() class Foo(Document): x = StringField() y = IntField() items = EmbeddedDocumentListField(Bar) Foo.drop_collection() Foo(x="foo1", y=1).save() Foo(x="foo2", y=2, items=[]).save() Foo(x="foo3", y=3, items=[Bar(z="a", v="V")]).save() Foo( x="foo4", y=4, items=[ Bar(z="a", v="V"), Bar(z="b", v="W"), Bar(z="b", v="X"), Bar(z="c", v="V"), ], ).save() Foo( x="foo5", y=5, items=[ Bar(z="b", v="X"), Bar(z="c", v="V"), Bar(z="d", v="V"), Bar(z="e", v="V"), ], ).save() foos_with_x = list(Foo.objects.order_by("y").fields(x=1)) assert all(o.x is not None for o in foos_with_x) foos_without_y = list(Foo.objects.order_by("y").fields(y=0)) assert all(o.y is None for o in foos_without_y) foos_with_sliced_items = list(Foo.objects.order_by("y").fields(slice__items=1)) assert foos_with_sliced_items[0].items == [] assert foos_with_sliced_items[1].items == [] assert len(foos_with_sliced_items[2].items) == 1 assert foos_with_sliced_items[2].items[0].z == "a" assert len(foos_with_sliced_items[3].items) == 1 assert foos_with_sliced_items[3].items[0].z == "a" assert len(foos_with_sliced_items[4].items) == 1 assert foos_with_sliced_items[4].items[0].z == "b" foos_with_elem_match_items = list( Foo.objects.order_by("y").fields(elemMatch__items={"z": "b"}) ) assert foos_with_elem_match_items[0].items == [] assert foos_with_elem_match_items[1].items == [] assert foos_with_elem_match_items[2].items == [] assert len(foos_with_elem_match_items[3].items) == 1 assert foos_with_elem_match_items[3].items[0].z == "b" assert foos_with_elem_match_items[3].items[0].v == "W" assert len(foos_with_elem_match_items[4].items) == 1 assert foos_with_elem_match_items[4].items[0].z == "b" def test_elem_match(self): class Foo(EmbeddedDocument): shape = StringField() color = StringField() thick = BooleanField() meta = {"allow_inheritance": False} class Bar(Document): foo = ListField(EmbeddedDocumentField(Foo)) meta = {"allow_inheritance": False} Bar.drop_collection() b1 = Bar( foo=[ Foo(shape="square", color="purple", thick=False), Foo(shape="circle", color="red", thick=True), ] ) b1.save() b2 = Bar( foo=[ Foo(shape="square", color="red", thick=True), Foo(shape="circle", color="purple", thick=False), ] ) b2.save() b3 = Bar( foo=[ Foo(shape="square", thick=True), Foo(shape="circle", color="purple", thick=False), ] ) b3.save() ak = list(Bar.objects(foo__match={"shape": "square", "color": "purple"})) assert [b1] == ak ak = list(Bar.objects(foo__elemMatch={"shape": "square", "color": "purple"})) assert [b1] == ak ak = list(Bar.objects(foo__match=Foo(shape="square", color="purple"))) assert [b1] == ak ak = list( Bar.objects(foo__elemMatch={"shape": "square", "color__exists": True}) ) assert [b1, b2] == ak ak = list(Bar.objects(foo__match={"shape": "square", "color__exists": True})) assert [b1, b2] == ak ak = list( Bar.objects(foo__elemMatch={"shape": "square", "color__exists": False}) ) assert [b3] == ak ak = list(Bar.objects(foo__match={"shape": "square", "color__exists": False})) assert [b3] == ak def test_upsert_includes_cls(self): """Upserts should include _cls information for inheritable classes""" class Test(Document): test = StringField() Test.drop_collection() Test.objects(test="foo").update_one(upsert=True, set__test="foo") assert "_cls" not in Test._collection.find_one() class Test(Document): meta = {"allow_inheritance": True} test = StringField() Test.drop_collection() Test.objects(test="foo").update_one(upsert=True, set__test="foo") assert "_cls" in Test._collection.find_one() def test_update_upsert_looks_like_a_digit(self): class MyDoc(DynamicDocument): pass MyDoc.drop_collection() assert 1 == MyDoc.objects.update_one(upsert=True, inc__47=1) assert MyDoc.objects.get()["47"] == 1 def test_dictfield_key_looks_like_a_digit(self): """Only should work with DictField even if they have numeric keys.""" class MyDoc(Document): test = DictField() MyDoc.drop_collection() doc = MyDoc(test={"47": 1}) doc.save() assert MyDoc.objects.only("test__47").get().test["47"] == 1 def test_clear_cls_query(self): class Parent(Document): name = StringField() meta = {"allow_inheritance": True} class Child(Parent): age = IntField() Parent.drop_collection() # Default query includes the "_cls" check. assert Parent.objects._query == {"_cls": {"$in": ("Parent", "Parent.Child")}} # Clearing the "_cls" query should work. assert Parent.objects.clear_cls_query()._query == {} # Clearing the "_cls" query should not persist across queryset instances. assert Parent.objects._query == {"_cls": {"$in": ("Parent", "Parent.Child")}} # The rest of the query should not be cleared. assert Parent.objects.filter(name="xyz").clear_cls_query()._query == { "name": "xyz" } Parent.objects.create(name="foo") Child.objects.create(name="bar", age=1) assert Parent.objects.clear_cls_query().count() == 2 assert Parent.objects.count() == 2 assert Child.objects().count() == 1 # XXX This isn't really how you'd want to use `clear_cls_query()`, but # it's a decent test to validate its behavior nonetheless. assert Child.objects.clear_cls_query().count() == 2 def test_read_preference(self): class Bar(Document): txt = StringField() meta = {"indexes": ["txt"]} Bar.drop_collection() bar = Bar.objects.create(txt="xyz") bars = list(Bar.objects.read_preference(ReadPreference.PRIMARY)) assert bars == [bar] bars = Bar.objects.read_preference(ReadPreference.SECONDARY_PREFERRED) assert bars._read_preference == ReadPreference.SECONDARY_PREFERRED assert ( bars._cursor.collection.read_preference == ReadPreference.SECONDARY_PREFERRED ) # Make sure that `.read_preference(...)` does accept string values. with pytest.raises(TypeError): Bar.objects.read_preference("Primary") def assert_read_pref(qs, expected_read_pref): assert qs._read_preference == expected_read_pref assert qs._cursor.collection.read_preference == expected_read_pref # Make sure read preference is respected after a `.skip(...)`. bars = Bar.objects.skip(1).read_preference(ReadPreference.SECONDARY_PREFERRED) assert_read_pref(bars, ReadPreference.SECONDARY_PREFERRED) # Make sure read preference is respected after a `.limit(...)`. bars = Bar.objects.limit(1).read_preference(ReadPreference.SECONDARY_PREFERRED) assert_read_pref(bars, ReadPreference.SECONDARY_PREFERRED) # Make sure read preference is respected after an `.order_by(...)`. bars = Bar.objects.order_by("txt").read_preference( ReadPreference.SECONDARY_PREFERRED ) assert_read_pref(bars, ReadPreference.SECONDARY_PREFERRED) # Make sure read preference is respected after a `.hint(...)`. bars = Bar.objects.hint([("txt", 1)]).read_preference( ReadPreference.SECONDARY_PREFERRED ) assert_read_pref(bars, ReadPreference.SECONDARY_PREFERRED) def test_read_concern(self): class Bar(Document): txt = StringField() meta = {"indexes": ["txt"]} Bar.drop_collection() bar = Bar.objects.create(txt="xyz") bars = list(Bar.objects.read_concern(None)) assert bars == [bar] bars = Bar.objects.read_concern({"level": "local"}) assert bars._read_concern.document == {"level": "local"} assert bars._cursor.collection.read_concern.document == {"level": "local"} # Make sure that `.read_concern(...)` does not accept string values. with pytest.raises(TypeError): Bar.objects.read_concern("local") def assert_read_concern(qs, expected_read_concern): assert qs._read_concern.document == expected_read_concern assert qs._cursor.collection.read_concern.document == expected_read_concern # Make sure read concern is respected after a `.skip(...)`. bars = Bar.objects.skip(1).read_concern({"level": "local"}) assert_read_concern(bars, {"level": "local"}) # Make sure read concern is respected after a `.limit(...)`. bars = Bar.objects.limit(1).read_concern({"level": "local"}) assert_read_concern(bars, {"level": "local"}) # Make sure read concern is respected after an `.order_by(...)`. bars = Bar.objects.order_by("txt").read_concern({"level": "local"}) assert_read_concern(bars, {"level": "local"}) # Make sure read concern is respected after a `.hint(...)`. bars = Bar.objects.hint([("txt", 1)]).read_concern({"level": "majority"}) assert_read_concern(bars, {"level": "majority"}) def test_json_simple(self): class Embedded(EmbeddedDocument): string = StringField() class Doc(Document): string = StringField() embedded_field = EmbeddedDocumentField(Embedded) Doc.drop_collection() Doc(string="Hi", embedded_field=Embedded(string="Hi")).save() Doc(string="Bye", embedded_field=Embedded(string="Bye")).save() Doc().save() json_data = Doc.objects.to_json(sort_keys=True, separators=(",", ":")) doc_objects = list(Doc.objects) assert doc_objects == Doc.objects.from_json(json_data) def test_json_complex(self): class EmbeddedDoc(EmbeddedDocument): pass class Simple(Document): pass class Doc(Document): string_field = StringField(default="1") int_field = IntField(default=1) float_field = FloatField(default=1.1) boolean_field = BooleanField(default=True) datetime_field = DateTimeField(default=datetime.datetime.now) embedded_document_field = EmbeddedDocumentField( EmbeddedDoc, default=lambda: EmbeddedDoc() ) list_field = ListField(default=lambda: [1, 2, 3]) dict_field = DictField(default=lambda: {"hello": "world"}) objectid_field = ObjectIdField(default=ObjectId) reference_field = ReferenceField(Simple, default=lambda: Simple().save()) map_field = MapField(IntField(), default=lambda: {"simple": 1}) decimal_field = DecimalField(default=1.0) complex_datetime_field = ComplexDateTimeField(default=datetime.datetime.now) url_field = URLField(default="http://mongoengine.org") dynamic_field = DynamicField(default=1) generic_reference_field = GenericReferenceField( default=lambda: Simple().save() ) sorted_list_field = SortedListField(IntField(), default=lambda: [1, 2, 3]) email_field = EmailField(default="ross@example.com") geo_point_field = GeoPointField(default=lambda: [1, 2]) sequence_field = SequenceField() uuid_field = UUIDField(default=uuid.uuid4) generic_embedded_document_field = GenericEmbeddedDocumentField( default=lambda: EmbeddedDoc() ) Simple.drop_collection() Doc.drop_collection() Doc().save() json_data = Doc.objects.to_json() doc_objects = list(Doc.objects) assert doc_objects == Doc.objects.from_json(json_data) def test_as_pymongo(self): class LastLogin(EmbeddedDocument): location = StringField() ip = StringField() class User(Document): id = StringField(primary_key=True) name = StringField() age = IntField() price = DecimalField() last_login = EmbeddedDocumentField(LastLogin) User.drop_collection() User.objects.create(id="Bob", name="Bob Dole", age=89, price=Decimal("1.11")) User.objects.create( id="Barak", name="Barak Obama", age=51, price=Decimal("2.22"), last_login=LastLogin(location="White House", ip="104.107.108.116"), ) results = User.objects.as_pymongo() assert set(results[0].keys()) == {"_id", "name", "age", "price"} assert set(results[1].keys()) == {"_id", "name", "age", "price", "last_login"} results = User.objects.only("id", "name").as_pymongo() assert set(results[0].keys()) == {"_id", "name"} users = User.objects.only("name", "price").as_pymongo() results = list(users) assert isinstance(results[0], dict) assert isinstance(results[1], dict) assert results[0]["name"] == "Bob Dole" assert results[0]["price"] == 1.11 assert results[1]["name"] == "Barak Obama" assert results[1]["price"] == 2.22 users = User.objects.only("name", "last_login").as_pymongo() results = list(users) assert isinstance(results[0], dict) assert isinstance(results[1], dict) assert results[0] == {"_id": "Bob", "name": "Bob Dole"} assert results[1] == { "_id": "Barak", "name": "Barak Obama", "last_login": {"location": "White House", "ip": "104.107.108.116"}, } def test_as_pymongo_returns_cls_attribute_when_using_inheritance(self): class User(Document): name = StringField() meta = {"allow_inheritance": True} User.drop_collection() user = User(name="Bob Dole").save() result = User.objects.as_pymongo().first() assert result == {"_cls": "User", "_id": user.id, "name": "Bob Dole"} def test_as_pymongo_json_limit_fields(self): class User(Document): email = EmailField(unique=True, required=True) password_hash = StringField(db_field="password_hash", required=True) password_salt = StringField(db_field="password_salt", required=True) User.drop_collection() User( email="ross@example.com", password_salt="SomeSalt", password_hash="SomeHash" ).save() serialized_user = User.objects.exclude( "password_salt", "password_hash" ).as_pymongo()[0] assert {"_id", "email"} == set(serialized_user.keys()) serialized_user = User.objects.exclude( "id", "password_salt", "password_hash" ).to_json() assert '[{"email": "ross@example.com"}]' == serialized_user serialized_user = User.objects.only("email").as_pymongo()[0] assert {"_id", "email"} == set(serialized_user.keys()) serialized_user = ( User.objects.exclude("password_salt").only("email").as_pymongo()[0] ) assert {"_id", "email"} == set(serialized_user.keys()) serialized_user = ( User.objects.exclude("password_salt", "id").only("email").as_pymongo()[0] ) assert {"email"} == set(serialized_user.keys()) serialized_user = ( User.objects.exclude("password_salt", "id").only("email").to_json() ) assert '[{"email": "ross@example.com"}]' == serialized_user def test_only_after_count(self): """Test that only() works after count()""" class User(Document): name = StringField() age = IntField() address = StringField() User.drop_collection() user = User(name="User", age=50, address="Moscow, Russia").save() user_queryset = User.objects(age=50) result = user_queryset.only("name", "age").as_pymongo().first() assert result == {"_id": user.id, "name": "User", "age": 50} result = user_queryset.count() assert result == 1 result = user_queryset.only("name", "age").as_pymongo().first() assert result == {"_id": user.id, "name": "User", "age": 50} def test_no_dereference(self): class Organization(Document): name = StringField() class User(Document): name = StringField() organization = ReferenceField(Organization) User.drop_collection() Organization.drop_collection() whitehouse = Organization(name="White House").save() User(name="Bob Dole", organization=whitehouse).save() qs = User.objects() qs_user = qs.first() assert isinstance(qs.first().organization, Organization) user = qs.no_dereference().first() assert isinstance(user.organization, DBRef) assert isinstance(qs_user.organization, Organization) assert isinstance(qs.first().organization, Organization) def test_no_dereference_internals(self): # Test the internals on which queryset.no_dereference relies on class Organization(Document): name = StringField() class User(Document): organization = ReferenceField(Organization) User.drop_collection() Organization.drop_collection() cls_organization_field = User.organization assert cls_organization_field._auto_dereference, True # default org = Organization(name="whatever").save() User(organization=org).save() qs_no_deref = User.objects().no_dereference() user_no_deref = qs_no_deref.first() assert not qs_no_deref._auto_dereference # Make sure the instance field is different from the class field instance_org_field = user_no_deref._fields["organization"] assert instance_org_field is not cls_organization_field assert not instance_org_field._auto_dereference assert isinstance(user_no_deref.organization, DBRef) assert ( cls_organization_field._auto_dereference ), True # Make sure the class Field wasn't altered def test_no_dereference_no_side_effect_on_existing_instance(self): # Relates to issue #1677 - ensures no regression of the bug class Organization(Document): name = StringField() class User(Document): organization = ReferenceField(Organization) organization_gen = GenericReferenceField() User.drop_collection() Organization.drop_collection() org = Organization(name="whatever").save() User(organization=org, organization_gen=org).save() qs = User.objects() user = qs.first() qs_no_deref = User.objects().no_dereference() user_no_deref = qs_no_deref.first() # ReferenceField no_derf_org = user_no_deref.organization # was triggering the bug assert isinstance(no_derf_org, DBRef) assert isinstance(user.organization, Organization) # GenericReferenceField no_derf_org_gen = user_no_deref.organization_gen assert isinstance(no_derf_org_gen, dict) assert isinstance(user.organization_gen, Organization) def test_no_dereference_embedded_doc(self): class User(Document): name = StringField() class Member(EmbeddedDocument): name = StringField() user = ReferenceField(User) class Organization(Document): name = StringField() members = ListField(EmbeddedDocumentField(Member)) ceo = ReferenceField(User) member = EmbeddedDocumentField(Member) admins = ListField(ReferenceField(User)) Organization.drop_collection() User.drop_collection() user = User(name="Flash") user.save() member = Member(name="Flash", user=user) company = Organization( name="Mongo Inc", ceo=user, member=member, admins=[user], members=[member] ) company.save() org = Organization.objects().no_dereference().first() assert id(org._fields["admins"]) != id(Organization.admins) assert not org._fields["admins"]._auto_dereference admin = org.admins[0] assert isinstance(admin, DBRef) assert isinstance(org.member.user, DBRef) assert isinstance(org.members[0].user, DBRef) def test_cached_queryset(self): class Person(Document): name = StringField() Person.drop_collection() persons = [Person(name="No: %s" % i) for i in range(100)] Person.objects.insert(persons, load_bulk=True) with query_counter() as q: assert q == 0 people = Person.objects [x for x in people] assert 100 == len(people._result_cache) import platform if platform.python_implementation() != "PyPy": # PyPy evaluates __len__ when iterating with list comprehensions while CPython does not. # This may be a bug in PyPy (PyPy/#1802) but it does not affect # the behavior of MongoEngine. assert people._len is None assert q == 1 list(people) assert 100 == people._len # Caused by list calling len assert q == 1 people.count(with_limit_and_skip=True) # count is cached assert q == 1 def test_no_cached_queryset(self): class Person(Document): name = StringField() Person.drop_collection() persons = [Person(name="No: %s" % i) for i in range(100)] Person.objects.insert(persons, load_bulk=True) with query_counter() as q: assert q == 0 people = Person.objects.no_cache() [x for x in people] assert q == 1 list(people) assert q == 2 people.count() assert q == 3 def test_no_cached_queryset__repr__(self): class Person(Document): name = StringField() Person.drop_collection() qs = Person.objects.no_cache() assert repr(qs) == "[]" def test_no_cached_on_a_cached_queryset_raise_error(self): class Person(Document): name = StringField() Person.drop_collection() Person(name="a").save() qs = Person.objects() _ = list(qs) with pytest.raises(OperationError, match="QuerySet already cached"): qs.no_cache() def test_no_cached_queryset_no_cache_back_to_cache(self): class Person(Document): name = StringField() Person.drop_collection() qs = Person.objects() assert isinstance(qs, QuerySet) qs = qs.no_cache() assert isinstance(qs, QuerySetNoCache) qs = qs.cache() assert isinstance(qs, QuerySet) def test_cache_not_cloned(self): class User(Document): name = StringField() def __unicode__(self): return self.name User.drop_collection() User(name="Alice").save() User(name="Bob").save() users = User.objects.all().order_by("name") assert "%s" % users == "[<User: Alice>, <User: Bob>]" assert 2 == len(users._result_cache) users = users.filter(name="Bob") assert "%s" % users == "[<User: Bob>]" assert 1 == len(users._result_cache) def test_no_cache(self): """Ensure you can add metadata to file""" class Noddy(Document): fields = DictField() Noddy.drop_collection() noddies = [] for i in range(100): noddy = Noddy() for j in range(20): noddy.fields["key" + str(j)] = "value " + str(j) noddies.append(noddy) Noddy.objects.insert(noddies, load_bulk=True) docs = Noddy.objects.no_cache() counter = len([1 for i in docs]) assert counter == 100 assert len(list(docs)) == 100 # Can't directly get a length of a no-cache queryset. with pytest.raises(TypeError): len(docs) # Another iteration over the queryset should result in another db op. with query_counter() as q: list(docs) assert q == 1 # ... and another one to double-check. with query_counter() as q: list(docs) assert q == 1 def test_nested_queryset_iterator(self): # Try iterating the same queryset twice, nested. names = ["Alice", "Bob", "Chuck", "David", "Eric", "Francis", "George"] class User(Document): name = StringField() def __unicode__(self): return self.name User.drop_collection() for name in names: User(name=name).save() users = User.objects.all().order_by("name") outer_count = 0 inner_count = 0 inner_total_count = 0 with query_counter() as q: assert q == 0 assert users.count(with_limit_and_skip=True) == 7 for i, outer_user in enumerate(users): assert outer_user.name == names[i] outer_count += 1 inner_count = 0 # Calling len might disrupt the inner loop if there are bugs assert users.count(with_limit_and_skip=True) == 7 for j, inner_user in enumerate(users): assert inner_user.name == names[j] inner_count += 1 inner_total_count += 1 # inner loop should always be executed seven times assert inner_count == 7 # outer loop should be executed seven times total assert outer_count == 7 # inner loop should be executed fourtynine times total assert inner_total_count == 7 * 7 assert q == 2 def test_no_sub_classes(self): class A(Document): x = IntField() y = IntField() meta = {"allow_inheritance": True} class B(A): z = IntField() class C(B): zz = IntField() A.drop_collection() A(x=10, y=20).save() A(x=15, y=30).save() B(x=20, y=40).save() B(x=30, y=50).save() C(x=40, y=60).save() assert A.objects.no_sub_classes().count() == 2 assert A.objects.count() == 5 assert B.objects.no_sub_classes().count() == 2 assert B.objects.count() == 3 assert C.objects.no_sub_classes().count() == 1 assert C.objects.count() == 1 for obj in A.objects.no_sub_classes(): assert obj.__class__ == A for obj in B.objects.no_sub_classes(): assert obj.__class__ == B for obj in C.objects.no_sub_classes(): assert obj.__class__ == C def test_query_generic_embedded_document(self): """Ensure that querying sub field on generic_embedded_field works""" class A(EmbeddedDocument): a_name = StringField() class B(EmbeddedDocument): b_name = StringField() class Doc(Document): document = GenericEmbeddedDocumentField(choices=(A, B)) Doc.drop_collection() Doc(document=A(a_name="A doc")).save() Doc(document=B(b_name="B doc")).save() # Using raw in filter working fine assert Doc.objects(__raw__={"document.a_name": "A doc"}).count() == 1 assert Doc.objects(__raw__={"document.b_name": "B doc"}).count() == 1 assert Doc.objects(document__a_name="A doc").count() == 1 assert Doc.objects(document__b_name="B doc").count() == 1 def test_query_reference_to_custom_pk_doc(self): class A(Document): id = StringField(primary_key=True) class B(Document): a = ReferenceField(A) A.drop_collection() B.drop_collection() a = A.objects.create(id="custom_id") B.objects.create(a=a) assert B.objects.count() == 1 assert B.objects.get(a=a).a == a assert B.objects.get(a=a.id).a == a def test_cls_query_in_subclassed_docs(self): class Animal(Document): name = StringField() meta = {"allow_inheritance": True} class Dog(Animal): pass class Cat(Animal): pass assert Animal.objects(name="Charlie")._query == { "name": "Charlie", "_cls": {"$in": ("Animal", "Animal.Dog", "Animal.Cat")}, } assert Dog.objects(name="Charlie")._query == { "name": "Charlie", "_cls": "Animal.Dog", } assert Cat.objects(name="Charlie")._query == { "name": "Charlie", "_cls": "Animal.Cat", } def test_can_have_field_same_name_as_query_operator(self): class Size(Document): name = StringField() class Example(Document): size = ReferenceField(Size) Size.drop_collection() Example.drop_collection() instance_size = Size(name="Large").save() Example(size=instance_size).save() assert Example.objects(size=instance_size).count() == 1 assert Example.objects(size__in=[instance_size]).count() == 1 def test_cursor_in_an_if_stmt(self): class Test(Document): test_field = StringField() Test.drop_collection() queryset = Test.objects if queryset: raise AssertionError("Empty cursor returns True") test = Test() test.test_field = "test" test.save() queryset = Test.objects if not test: raise AssertionError("Cursor has data and returned False") next(queryset) if not queryset: raise AssertionError( "Cursor has data and it must returns True, even in the last item." ) def test_bool_performance(self): class Person(Document): name = StringField() Person.drop_collection() persons = [Person(name="No: %s" % i) for i in range(100)] Person.objects.insert(persons, load_bulk=True) with query_counter() as q: if Person.objects: pass assert q == 1 op = q.db.system.profile.find( {"ns": {"$ne": "%s.system.indexes" % q.db.name}} )[0] assert op["nreturned"] == 1 def test_bool_with_ordering(self): ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version) class Person(Document): name = StringField() Person.drop_collection() Person(name="Test").save() # Check that bool(queryset) does not uses the orderby qs = Person.objects.order_by("name") with query_counter() as q: if bool(qs): pass op = q.db.system.profile.find( {"ns": {"$ne": "%s.system.indexes" % q.db.name}} )[0] assert ORDER_BY_KEY not in op[CMD_QUERY_KEY] # Check that normal query uses orderby qs2 = Person.objects.order_by("name") with query_counter() as q: for x in qs2: pass op = q.db.system.profile.find( {"ns": {"$ne": "%s.system.indexes" % q.db.name}} )[0] assert ORDER_BY_KEY in op[CMD_QUERY_KEY] def test_bool_with_ordering_from_meta_dict(self): ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version) class Person(Document): name = StringField() meta = {"ordering": ["name"]} Person.drop_collection() Person(name="B").save() Person(name="C").save() Person(name="A").save() with query_counter() as q: if Person.objects: pass op = q.db.system.profile.find( {"ns": {"$ne": "%s.system.indexes" % q.db.name}} )[0] assert ( "$orderby" not in op[CMD_QUERY_KEY] ), "BaseQuerySet must remove orderby from meta in boolen test" assert Person.objects.first().name == "A" assert Person.objects._has_data(), "Cursor has data and returned False" def test_delete_count(self): [self.Person(name=f"User {i}", age=i * 10).save() for i in range(1, 4)] assert ( self.Person.objects().delete() == 3 ) # test ordinary QuerySey delete count [self.Person(name=f"User {i}", age=i * 10).save() for i in range(1, 4)] assert ( self.Person.objects().skip(1).delete() == 2 ) # test Document delete with existing documents self.Person.objects().delete() assert ( self.Person.objects().skip(1).delete() == 0 ) # test Document delete without existing documents def test_max_time_ms(self): # 778: max_time_ms can get only int or None as input with pytest.raises(TypeError): self.Person.objects(name="name").max_time_ms("not a number") def test_subclass_field_query(self): class Animal(Document): is_mamal = BooleanField() meta = {"allow_inheritance": True} class Cat(Animal): whiskers_length = FloatField() class ScottishCat(Cat): folded_ears = BooleanField() Animal.drop_collection() Animal(is_mamal=False).save() Cat(is_mamal=True, whiskers_length=5.1).save() ScottishCat(is_mamal=True, folded_ears=True).save() assert Animal.objects(folded_ears=True).count() == 1 assert Animal.objects(whiskers_length=5.1).count() == 1 def test_loop_over_invalid_id_does_not_crash(self): class Person(Document): name = StringField() Person.drop_collection() Person._get_collection().insert_one({"name": "a", "id": ""}) for p in Person.objects(): assert p.name == "a" def test_len_during_iteration(self): """Tests that calling len on a queyset during iteration doesn't stop paging. """ class Data(Document): pass for i in range(300): Data().save() records = Data.objects.limit(250) # This should pull all 250 docs from mongo and populate the result # cache len(records) # Assert that iterating over documents in the qs touches every # document even if we call len(qs) midway through the iteration. for i, r in enumerate(records): if i == 58: len(records) assert i == 249 # Assert the same behavior is true even if we didn't pre-populate the # result cache. records = Data.objects.limit(250) for i, r in enumerate(records): if i == 58: len(records) assert i == 249 def test_iteration_within_iteration(self): """You should be able to reliably iterate over all the documents in a given queryset even if there are multiple iterations of it happening at the same time. """ class Data(Document): pass for i in range(300): Data().save() qs = Data.objects.limit(250) for i, doc in enumerate(qs): for j, doc2 in enumerate(qs): pass assert i == 249 assert j == 249 def test_in_operator_on_non_iterable(self): """Ensure that using the `__in` operator on a non-iterable raises an error. """ class User(Document): name = StringField() class BlogPost(Document): content = StringField() authors = ListField(ReferenceField(User)) User.drop_collection() BlogPost.drop_collection() author = User.objects.create(name="Test User") post = BlogPost.objects.create( content="Had a good coffee today...", authors=[author] ) # Make sure using `__in` with a list works blog_posts = BlogPost.objects(authors__in=[author]) assert list(blog_posts) == [post] # Using `__in` with a non-iterable should raise a TypeError with pytest.raises(TypeError): BlogPost.objects(authors__in=author.pk).count() # Using `__in` with a `Document` (which is seemingly iterable but not # in a way we'd expect) should raise a TypeError, too with pytest.raises(TypeError): BlogPost.objects(authors__in=author).count() def test_create_count(self): self.Person.drop_collection() self.Person.objects.create(name="Foo") self.Person.objects.create(name="Bar") self.Person.objects.create(name="Baz") assert self.Person.objects.count(with_limit_and_skip=True) == 3 self.Person.objects.create(name="Foo_1") assert self.Person.objects.count(with_limit_and_skip=True) == 4 def test_no_cursor_timeout(self): qs = self.Person.objects() assert qs._cursor_args == {} # ensure no regression of #2148 qs = self.Person.objects().timeout(True) assert qs._cursor_args == {} qs = self.Person.objects().timeout(False) assert qs._cursor_args == {"no_cursor_timeout": True} @requires_mongodb_gte_44 def test_allow_disk_use(self): qs = self.Person.objects() assert qs._cursor_args == {} qs = self.Person.objects().allow_disk_use(False) assert qs._cursor_args == {} qs = self.Person.objects().allow_disk_use(True) assert qs._cursor_args == {"allow_disk_use": True} # Test if allow_disk_use changes the results self.Person.drop_collection() self.Person.objects.create(name="Foo", age=12) self.Person.objects.create(name="Baz", age=17) self.Person.objects.create(name="Bar", age=13) qs_disk = self.Person.objects().order_by("age").allow_disk_use(True) qs = self.Person.objects().order_by("age") assert qs_disk.count() == qs.count() for index in range(qs_disk.count()): assert qs_disk[index] == qs[index] if __name__ == "__main__": unittest.main()
TestQueryset
python
huggingface__transformers
examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
{ "start": 4821, "end": 17145 }
class ____: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="nvidia/mit-b0", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset( data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code ) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = data_args.dataset_name filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"))) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()} # Load the mean IoU metric from the evaluate package metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. @torch.no_grad() def compute_metrics(eval_pred): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSemanticSegmentation.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, do_reduce_labels=data_args.do_reduce_labels, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Define transforms to be applied to each image and target. if "shortest_edge" in image_processor.size: # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. height, width = image_processor.size["shortest_edge"], image_processor.size["shortest_edge"] else: height, width = image_processor.size["height"], image_processor.size["width"] train_transforms = A.Compose( [ A.Lambda( name="reduce_labels", mask=reduce_labels_transform if data_args.do_reduce_labels else None, p=1.0, ), # pad image with 255, because it is ignored by loss A.PadIfNeeded(min_height=height, min_width=width, border_mode=0, value=255, p=1.0), A.RandomCrop(height=height, width=width, p=1.0), A.HorizontalFlip(p=0.5), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) val_transforms = A.Compose( [ A.Lambda( name="reduce_labels", mask=reduce_labels_transform if data_args.do_reduce_labels else None, p=1.0, ), A.Resize(height=height, width=width, p=1.0), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) def preprocess_batch(example_batch, transforms: A.Compose): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): transformed = transforms(image=np.array(image.convert("RGB")), mask=np.array(target)) pixel_values.append(transformed["image"]) labels.append(transformed["mask"]) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values).to(torch.float) encoding["labels"] = torch.stack(labels).to(torch.long) return encoding # Preprocess function for dataset should have only one argument, # so we use partial to pass the transforms preprocess_train_batch_fn = partial(preprocess_batch, transforms=train_transforms) preprocess_val_batch_fn = partial(preprocess_batch, transforms=val_transforms) if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(preprocess_train_batch_fn) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(preprocess_val_batch_fn) # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, processing_class=image_processor, data_collator=default_data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["image-segmentation", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
ModelArguments
python
RaRe-Technologies__gensim
gensim/examples/dmlcz/dmlcorpus.py
{ "start": 433, "end": 2266 }
class ____: """ DmlConfig contains parameters necessary for the abstraction of a 'corpus of articles' (see the `DmlCorpus` class). Articles may come from different sources (=different locations on disk/network, different file formats etc.), so the main purpose of DmlConfig is to keep all sources in one place. Apart from glueing sources together, DmlConfig also decides where to store output files and which articles to accept for the corpus (= an additional filter over the sources). """ def __init__(self, configId, resultDir, acceptLangs=None): self.resultDir = resultDir # output files will be stored in this directory self.configId = configId self.sources = {} # all article sources; see sources.DmlSource class for an example of source if acceptLangs is None: # which languages to accept acceptLangs = {'any'} # if not specified, accept all languages (including unknown/unspecified) self.acceptLangs = set(acceptLangs) logger.info('initialized %s', self) def resultFile(self, fname): return os.path.join(self.resultDir, self.configId + '_' + fname) def acceptArticle(self, metadata): lang = metadata.get('language', 'unk') if 'any' not in self.acceptLangs and lang not in self.acceptLangs: return False return True def addSource(self, source): sourceId = str(source) assert sourceId not in self.sources, "source %s already present in the config!" % sourceId self.sources[sourceId] = source def __str__(self): return "%s<id=%s, sources=[%s], acceptLangs=[%s]>" % ( self.__class__.__name__, self.configId, ', '.join(self.sources.iterkeys()), ', '.join(self.acceptLangs) ) # endclass DmlConfig
DmlConfig
python
airbytehq__airbyte
airbyte-integrations/connectors/source-surveycto/source_surveycto/helpers.py
{ "start": 351, "end": 2329 }
class ____(object): @staticmethod def _base64_encode(string: str) -> str: return base64.b64encode(string.encode("ascii")).decode("ascii") @staticmethod def call_survey_cto(config, form_id): server_name = config["server_name"] start_date = config["start_date"] user_name_password = f"{config['username']}:{config['password']}" auth_token = Helpers._base64_encode(user_name_password) url = f"https://{server_name}.surveycto.com/" + f"api/v2/forms/data/wide/json/{form_id}?date={start_date}" retry_strategy = Retry(total=3, status_forcelist=[429, 409], method_whitelist=["HEAD", "GET", "OPTIONS"]) adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) # ignore-https-check response = http.get(url, headers={"Authorization": "Basic " + auth_token}) response_json = response.json() if response.status_code != 200 and response_json["error"]: message = response_json["error"]["message"] raise Exception(message) for data in response_json: try: yield data except Exception as e: raise e return data @staticmethod def get_filter_data(data): generator = SchemaGenerator(input_format="dict", infer_mode="NULLABLE", preserve_input_sort_order="true") schema_map, error_logs = generator.deduce_schema(input_data=data) schema = generator.flatten_schema(schema_map) schema_json = converter(schema) schema = schema_json["definitions"]["element"]["properties"] return schema @staticmethod def get_json_schema(schema): json_schema = { "$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "properties": schema, } return json_schema
Helpers
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 61628, "end": 62522 }
class ____(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = postgresql.dialect() @testing.combinations( (postgresql.TIME(), "TIME WITHOUT TIME ZONE"), (postgresql.TIME(precision=5), "TIME(5) WITHOUT TIME ZONE"), ( postgresql.TIME(timezone=True, precision=5), "TIME(5) WITH TIME ZONE", ), (postgresql.TIMESTAMP(), "TIMESTAMP WITHOUT TIME ZONE"), (postgresql.TIMESTAMP(precision=5), "TIMESTAMP(5) WITHOUT TIME ZONE"), ( postgresql.TIMESTAMP(timezone=True, precision=5), "TIMESTAMP(5) WITH TIME ZONE", ), (postgresql.TIME(precision=0), "TIME(0) WITHOUT TIME ZONE"), (postgresql.TIMESTAMP(precision=0), "TIMESTAMP(0) WITHOUT TIME ZONE"), ) def test_compile(self, type_, expected): self.assert_compile(type_, expected)
TimePrecisionCompileTest
python
networkx__networkx
networkx/classes/graph.py
{ "start": 596, "end": 1492 }
class ____: """Data Descriptor class for _adj that resets ``adj`` cached_property when needed This assumes that the ``cached_property`` ``G.adj`` should be reset whenever ``G._adj`` is set to a new value. This object sits on a class and ensures that any instance of that class clears its cached property "adj" whenever the underlying instance attribute "_adj" is set to a new object. It only affects the set process of the obj._adj attribute. All get/del operations act as they normally would. For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html """ def __set__(self, obj, value): od = obj.__dict__ od["_adj"] = value # reset cached properties props = ["adj", "edges", "degree"] for prop in props: if prop in od: del od[prop]
_CachedPropertyResetterAdj
python
mlflow__mlflow
mlflow/deployments/plugin_manager.py
{ "start": 659, "end": 2839 }
class ____(abc.ABC): """ Abstract class defining a entrypoint based plugin registration. This class allows the registration of a function or class to provide an implementation for a given key/name. Implementations declared though the entrypoints can be automatically registered through the `register_entrypoints` method. """ def __init__(self, group_name): self._registry = {} self.group_name = group_name self._has_registered = None @abc.abstractmethod def __getitem__(self, item): # Letting the child class create this function so that the child # can raise custom exceptions if it needs to pass @property def registry(self): """ Registry stores the registered plugin as a key value pair where key is the name of the plugin and value is the plugin object """ return self._registry @property def has_registered(self): """ Returns bool representing whether the "register_entrypoints" has run or not. This doesn't return True if `register` method is called outside of `register_entrypoints` to register plugins """ return self._has_registered def register(self, target_name, plugin_module): """Register a deployment client given its target name and module Args: target_name: The name of the deployment target. This name will be used by `get_deploy_client()` to retrieve a deployment client from the plugin store. plugin_module: The module that implements the deployment plugin interface. """ self.registry[target_name] = importlib.metadata.EntryPoint( target_name, plugin_module, self.group_name ) def register_entrypoints(self): """ Runs through all the packages that has the `group_name` defined as the entrypoint and register that into the registry """ for entrypoint in get_entry_points(self.group_name): self.registry[entrypoint.name] = entrypoint self._has_registered = True @developer_stable
PluginManager
python
pytest-dev__pytest
src/_pytest/capture.py
{ "start": 17075, "end": 17718 }
class ____(FDCaptureBase[bytes]): """Capture IO to/from a given OS-level file descriptor. snap() produces `bytes`. """ EMPTY_BUFFER = b"" def snap(self) -> bytes: self._assert_state("snap", ("started", "suspended")) self.tmpfile.seek(0) res = self.tmpfile.buffer.read() self.tmpfile.seek(0) self.tmpfile.truncate() return res # type: ignore[return-value] def writeorg(self, data: bytes) -> None: """Write to original file descriptor.""" self._assert_state("writeorg", ("started", "suspended")) os.write(self.targetfd_save, data)
FDCaptureBinary
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1010845, "end": 1011443 }
class ____(sgqlc.types.Type): """Autogenerated return type of UnlockLockable""" __schema__ = github_schema __field_names__ = ("actor", "client_mutation_id", "unlocked_record") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" unlocked_record = sgqlc.types.Field(Lockable, graphql_name="unlockedRecord") """The item that was unlocked."""
UnlockLockablePayload
python
kamyu104__LeetCode-Solutions
Python/even-odd-tree.py
{ "start": 221, "end": 1006 }
class ____(object): def isEvenOddTree(self, root): """ :type root: TreeNode :rtype: bool """ q = [root] is_odd = False while q: new_q = [] prev = None for node in q: if is_odd: if node.val%2 or (prev and prev.val <= node.val): return False else: if not node.val%2 or (prev and prev.val >= node.val): return False if node.left: new_q.append(node.left) if node.right: new_q.append(node.right) prev = node q = new_q is_odd = not is_odd return True
Solution
python
getsentry__sentry-python
tests/integrations/beam/test_beam.py
{ "start": 1399, "end": 3941 }
class ____(DoFn): def process(self, x, timestamp=DoFn.TimestampParam, wx=DoFn.WindowParam): if isinstance(timestamp, _DoFnParam) or isinstance(wx, _DoFnParam): raise Exception("Bad instance") if x: 1 / 0 yield True def fail(x): if x: 1 / 0 return [True] test_parent = A(foo) test_child = B() test_simple = SimpleFunc() test_place_holder = PlaceHolderFunc() test_callable = CallableWrapperDoFn(fail) # Cannot call simple functions or placeholder test. @pytest.mark.parametrize( "obj,f,args,kwargs", [ [test_parent, "fn", (), {}], [test_child, "fn", (False,), {"element": True}], [test_child, "fn", (True,), {}], [test_simple, "process", (False,), {}], [test_callable, "process", (False,), {}], ], ) def test_monkey_patch_call(obj, f, args, kwargs): func = getattr(obj, f) assert func(*args, **kwargs) assert _wrap_task_call(func)(*args, **kwargs) @pytest.mark.parametrize("f", [foo, bar, baz, test_parent.fn, test_child.fn]) def test_monkey_patch_pickle(f): f_temp = _wrap_task_call(f) assert dill.pickles(f_temp), "{} is not pickling correctly!".format(f) # Pickle everything s1 = dill.dumps(f_temp) s2 = dill.loads(s1) dill.dumps(s2) @pytest.mark.parametrize( "f,args,kwargs", [ [foo, (), {}], [bar, (1, 5), {}], [baz, (1,), {}], [test_parent.fn, (), {}], [test_child.fn, (False,), {"element": True}], [test_child.fn, (True,), {}], ], ) def test_monkey_patch_signature(f, args, kwargs): arg_types = [instance_to_type(v) for v in args] kwargs_types = {k: instance_to_type(v) for (k, v) in kwargs.items()} f_temp = _wrap_task_call(f) try: getcallargs_forhints(f, *arg_types, **kwargs_types) except Exception: print("Failed on {} with parameters {}, {}".format(f, args, kwargs)) raise try: getcallargs_forhints(f_temp, *arg_types, **kwargs_types) except Exception: print("Failed on {} with parameters {}, {}".format(f_temp, args, kwargs)) raise try: expected_signature = inspect.signature(f) test_signature = inspect.signature(f_temp) assert expected_signature == test_signature, ( "Failed on {}, signature {} does not match {}".format( f, expected_signature, test_signature ) ) except Exception: # expected to pass for py2.7 pass
PlaceHolderFunc
python
rushter__MLAlgorithms
mla/neuralnet/layers/convnet.py
{ "start": 145, "end": 2617 }
class ____(Layer, ParamMixin): def __init__( self, n_filters=8, filter_shape=(3, 3), padding=(0, 0), stride=(1, 1), parameters=None, ): """A 2D convolutional layer. Input shape: (n_images, n_channels, height, width) Parameters ---------- n_filters : int, default 8 The number of filters (kernels). filter_shape : tuple(int, int), default (3, 3) The shape of the filters. (height, width) parameters : Parameters instance, default None stride : tuple(int, int), default (1, 1) The step of the convolution. (height, width). padding : tuple(int, int), default (0, 0) The number of pixel to add to each side of the input. (height, weight) """ self.padding = padding self._params = parameters self.stride = stride self.filter_shape = filter_shape self.n_filters = n_filters if self._params is None: self._params = Parameters() def setup(self, X_shape): n_channels, self.height, self.width = X_shape[1:] W_shape = (self.n_filters, n_channels) + self.filter_shape b_shape = self.n_filters self._params.setup_weights(W_shape, b_shape) def forward_pass(self, X): n_images, n_channels, height, width = self.shape(X.shape) self.last_input = X self.col = image_to_column(X, self.filter_shape, self.stride, self.padding) self.col_W = self._params["W"].reshape(self.n_filters, -1).T out = np.dot(self.col, self.col_W) + self._params["b"] out = out.reshape(n_images, height, width, -1).transpose(0, 3, 1, 2) return out def backward_pass(self, delta): delta = delta.transpose(0, 2, 3, 1).reshape(-1, self.n_filters) d_W = np.dot(self.col.T, delta).transpose(1, 0).reshape(self._params["W"].shape) d_b = np.sum(delta, axis=0) self._params.update_grad("b", d_b) self._params.update_grad("W", d_W) d_c = np.dot(delta, self.col_W.T) return column_to_image( d_c, self.last_input.shape, self.filter_shape, self.stride, self.padding ) def shape(self, x_shape): height, width = convoltuion_shape( self.height, self.width, self.filter_shape, self.stride, self.padding ) return x_shape[0], self.n_filters, height, width
Convolution
python
miyuchina__mistletoe
mistletoe/html_renderer.py
{ "start": 313, "end": 9612 }
class ____(BaseRenderer): """ HTML renderer class. See mistletoe.base_renderer module for more info. """ def __init__( self, *extras, html_escape_double_quotes=False, html_escape_single_quotes=False, process_html_tokens=True, **kwargs ): """ Args: extras (list): allows subclasses to add even more custom tokens. html_escape_double_quotes (bool): whether to also escape double quotes when HTML-escaping rendered text. html_escape_single_quotes (bool): whether to also escape single quotes when HTML-escaping rendered text. process_html_tokens (bool): whether to include HTML tokens in the processing. If `False`, HTML markup will be treated as plain text: e.g. input ``<br>`` will be rendered as ``&lt;br&gt;``. **kwargs: additional parameters to be passed to the ancestor's constructor. """ self._suppress_ptag_stack = [False] final_extras = chain((HtmlBlock, HtmlSpan) if process_html_tokens else (), extras) super().__init__(*final_extras, **kwargs) self.html_escape_double_quotes = html_escape_double_quotes self.html_escape_single_quotes = html_escape_single_quotes def __exit__(self, *args): super().__exit__(*args) def render_to_plain(self, token) -> str: if token.children is not None: inner = [self.render_to_plain(child) for child in token.children] return ''.join(inner) return html.escape(token.content) def render_strong(self, token: span_token.Strong) -> str: template = '<strong>{}</strong>' return template.format(self.render_inner(token)) def render_emphasis(self, token: span_token.Emphasis) -> str: template = '<em>{}</em>' return template.format(self.render_inner(token)) def render_inline_code(self, token: span_token.InlineCode) -> str: template = '<code>{}</code>' inner = self.escape_html_text(token.children[0].content) return template.format(inner) def render_strikethrough(self, token: span_token.Strikethrough) -> str: template = '<del>{}</del>' return template.format(self.render_inner(token)) def render_image(self, token: span_token.Image) -> str: template = '<img src="{}" alt="{}"{} />' if token.title: title = ' title="{}"'.format(html.escape(token.title)) else: title = '' return template.format(token.src, self.render_to_plain(token), title) def render_link(self, token: span_token.Link) -> str: template = '<a href="{target}"{title}>{inner}</a>' target = self.escape_url(token.target) if token.title: title = ' title="{}"'.format(html.escape(token.title)) else: title = '' inner = self.render_inner(token) return template.format(target=target, title=title, inner=inner) def render_auto_link(self, token: span_token.AutoLink) -> str: template = '<a href="{target}">{inner}</a>' if token.mailto: target = 'mailto:{}'.format(token.target) else: target = self.escape_url(token.target) inner = self.render_inner(token) return template.format(target=target, inner=inner) def render_escape_sequence(self, token: span_token.EscapeSequence) -> str: return self.render_inner(token) def render_raw_text(self, token: span_token.RawText) -> str: return self.escape_html_text(token.content) @staticmethod def render_html_span(token: span_token.HtmlSpan) -> str: return token.content def render_heading(self, token: block_token.Heading) -> str: template = '<h{level}>{inner}</h{level}>' inner = self.render_inner(token) return template.format(level=token.level, inner=inner) def render_quote(self, token: block_token.Quote) -> str: elements = ['<blockquote>'] self._suppress_ptag_stack.append(False) elements.extend([self.render(child) for child in token.children]) self._suppress_ptag_stack.pop() elements.append('</blockquote>') return '\n'.join(elements) def render_paragraph(self, token: block_token.Paragraph) -> str: if self._suppress_ptag_stack[-1]: return '{}'.format(self.render_inner(token)) return '<p>{}</p>'.format(self.render_inner(token)) def render_block_code(self, token: block_token.BlockCode) -> str: template = '<pre><code{attr}>{inner}</code></pre>' if token.language: attr = ' class="{}"'.format('language-{}'.format(html.escape(token.language))) else: attr = '' inner = self.escape_html_text(token.content) return template.format(attr=attr, inner=inner) def render_list(self, token: block_token.List) -> str: template = '<{tag}{attr}>\n{inner}\n</{tag}>' if token.start is not None: tag = 'ol' attr = ' start="{}"'.format(token.start) if token.start != 1 else '' else: tag = 'ul' attr = '' self._suppress_ptag_stack.append(not token.loose) inner = '\n'.join([self.render(child) for child in token.children]) self._suppress_ptag_stack.pop() return template.format(tag=tag, attr=attr, inner=inner) def render_list_item(self, token: block_token.ListItem) -> str: if len(token.children) == 0: return '<li></li>' inner = '\n'.join([self.render(child) for child in token.children]) inner_template = '\n{}\n' if self._suppress_ptag_stack[-1]: if token.children[0].__class__.__name__ == 'Paragraph': inner_template = inner_template[1:] if token.children[-1].__class__.__name__ == 'Paragraph': inner_template = inner_template[:-1] return '<li>{}</li>'.format(inner_template.format(inner)) def render_table(self, token: block_token.Table) -> str: # This is actually gross and I wonder if there's a better way to do it. # # The primary difficulty seems to be passing down alignment options to # reach individual cells. template = '<table>\n{inner}</table>' if hasattr(token, 'header'): head_template = '<thead>\n{inner}</thead>\n' head_inner = self.render_table_row(token.header, is_header=True) head_rendered = head_template.format(inner=head_inner) else: head_rendered = '' body_template = '<tbody>\n{inner}</tbody>\n' body_inner = self.render_inner(token) body_rendered = body_template.format(inner=body_inner) return template.format(inner=head_rendered + body_rendered) def render_table_row(self, token: block_token.TableRow, is_header=False) -> str: template = '<tr>\n{inner}</tr>\n' inner = ''.join([self.render_table_cell(child, is_header) for child in token.children]) return template.format(inner=inner) def render_table_cell(self, token: block_token.TableCell, in_header=False) -> str: template = '<{tag}{attr}>{inner}</{tag}>\n' tag = 'th' if in_header else 'td' if token.align is None: align = 'left' elif token.align == 0: align = 'center' elif token.align == 1: align = 'right' attr = ' align="{}"'.format(align) inner = self.render_inner(token) return template.format(tag=tag, attr=attr, inner=inner) @staticmethod def render_thematic_break(token: block_token.ThematicBreak) -> str: return '<hr />' @staticmethod def render_line_break(token: span_token.LineBreak) -> str: return '\n' if token.soft else '<br />\n' @staticmethod def render_html_block(token: block_token.HtmlBlock) -> str: return token.content def render_document(self, token: block_token.Document) -> str: self.footnotes.update(token.footnotes) inner = '\n'.join([self.render(child) for child in token.children]) return '{}\n'.format(inner) if inner else '' def escape_html_text(self, s: str) -> str: """ Like `html.escape()`, but this looks into the current rendering options to decide which of the quotes (double, single, or both) to escape. Intended for escaping text content. To escape content of an attribute, simply call `html.escape()`. """ s = s.replace("&", "&amp;") # Must be done first! s = s.replace("<", "&lt;") s = s.replace(">", "&gt;") if self.html_escape_double_quotes: s = s.replace('"', "&quot;") if self.html_escape_single_quotes: s = s.replace('\'', "&#x27;") return s @staticmethod def escape_url(raw: str) -> str: """ Escape urls to prevent code injection craziness. (Hopefully.) """ return html.escape(quote(raw, safe='/#:()*?=%@+,&;')) HTMLRenderer = HtmlRenderer """ Deprecated name of the `HtmlRenderer` class. """
HtmlRenderer
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/enum13.py
{ "start": 340, "end": 572 }
class ____(StrEnum): MEMBER_1 = "a" MEMBER_2 = "b" s1: Literal["a"] = StrEnum1.MEMBER_1.value # This should generate an error. s2: Literal["b"] = StrEnum1.MEMBER_1.value s3: LiteralString = StrEnum1.MEMBER_1.value
StrEnum1
python
pytest-dev__pytest
src/_pytest/terminal.py
{ "start": 1853, "end": 2904 }
class ____(argparse.Action): """A modified copy of the argparse count action which counts down and updates the legacy quiet attribute at the same time. Used to unify verbosity handling. """ def __init__( self, option_strings: Sequence[str], dest: str, default: object = None, required: bool = False, help: str | None = None, ) -> None: super().__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, help=help, ) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: str | Sequence[object] | None, option_string: str | None = None, ) -> None: new_count = getattr(namespace, self.dest, 0) - 1 setattr(namespace, self.dest, new_count) # todo Deprecate config.quiet namespace.quiet = getattr(namespace, "quiet", 0) + 1
MoreQuietAction
python
huggingface__transformers
src/transformers/models/gpt2/modeling_gpt2.py
{ "start": 21669, "end": 24019 }
class ____(PreTrainedModel): config: GPT2Config base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPT2Block"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _supports_attention_backend = True _can_compile_fullgraph = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @torch.no_grad() def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) # Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False): init.zeros_(module.weight[module.padding_idx]) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py if isinstance(module, PreTrainedModel): for name, p in module.named_parameters(): if name == "c_proj.weight": # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block init.normal_(p, mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) @dataclass @auto_docstring( custom_intro=""" Base class for outputs of models predicting if two sentences are consecutive or not. """ )
GPT2PreTrainedModel
python
streamlit__streamlit
lib/tests/streamlit/connections/base_connection_test.py
{ "start": 1192, "end": 5079 }
class ____(unittest.TestCase): def setUp(self) -> None: # st.secrets modifies os.environ, so we save it here and # restore in tearDown. self._prev_environ = dict(os.environ) def tearDown(self) -> None: os.environ.clear() os.environ.update(self._prev_environ) st.secrets._reset() def test_instance_set_to_connect_return_value(self): assert isinstance( MockConnection("my_mock_connection")._instance, MockRawConnection ) def test_getattr_works_with_methods_on_connection(self): assert MockConnection("my_mock_connection").some_method() == "some method" def test_getattr_friendly_error_message(self): with pytest.raises(AttributeError) as e: MockConnection("my_mock_connection").some_raw_connection_method() assert str(e.value) == ( "`some_raw_connection_method` doesn't exist here, but you can call " "`._instance.some_raw_connection_method` instead" ) assert ( MockConnection("my_mock_connection")._instance.some_raw_connection_method() == "some raw connection method" ) def test_getattr_totally_nonexistent_attr(self): with pytest.raises(AttributeError) as e: MockConnection("my_mock_connection").totally_nonexistent_method() assert ( str(e.value) == "'MockConnection' object has no attribute 'totally_nonexistent_method'" ) @patch("builtins.open", new_callable=mock_open, read_data=MOCK_TOML) def test_secrets_property(self, _): conn = MockConnection("my_mock_connection") assert conn._secrets.foo == "bar" @patch("builtins.open", new_callable=mock_open, read_data=MOCK_TOML) def test_secrets_property_no_matching_section(self, _): conn = MockConnection("nonexistent") assert conn._secrets == {} def test_secrets_property_no_secrets(self): conn = MockConnection("my_mock_connection") assert conn._secrets == {} def test_instance_prop_caches_raw_instance(self): conn = MockConnection("my_mock_connection") conn._raw_instance = "some other value" assert conn._instance == "some other value" def test_instance_prop_reinitializes_if_reset(self): conn = MockConnection("my_mock_connection") conn._raw_instance = None assert isinstance(conn._instance, MockRawConnection) def test_on_secrets_changed_when_nothing_changed(self): conn = MockConnection("my_mock_connection") # conn.reset() shouldn't be called because secrets haven't changed since conn # was constructed. with patch( "streamlit.connections.base_connection.BaseConnection.reset" ) as patched_reset: conn._on_secrets_changed("unused_arg") patched_reset.assert_not_called() def test_on_secrets_changed(self): conn = MockConnection("my_mock_connection") with ( patch( "streamlit.connections.base_connection.BaseConnection.reset" ) as patched_reset, patch( "streamlit.connections.base_connection.BaseConnection._secrets", PropertyMock( return_value=AttrDict({"mock_connection": {"new": "secret"}}) ), ), ): conn._on_secrets_changed("unused_arg") patched_reset.assert_called_once() # Test this here rather than in write_test.py because the MockConnection object # is defined here. Seems cleaner. def test_st_write(self): conn = MockConnection("my_mock_connection") with patch("streamlit.delta_generator.DeltaGenerator.help") as p: st.write(conn) p.assert_called_once_with(conn)
BaseConnectionDefaultMethodTests
python
docker__docker-py
tests/integration/models_services_test.py
{ "start": 195, "end": 13200 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): client = docker.from_env(version=TEST_API_VERSION) helpers.force_leave_swarm(client) client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr()) @classmethod def tearDownClass(cls): helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) def test_create(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( # create arguments name=name, labels={'foo': 'bar'}, # ContainerSpec arguments image="alpine", command="sleep 300", container_labels={'container': 'label'}, rollback_config={'order': 'start-first'} ) assert service.name == name assert service.attrs['Spec']['Labels']['foo'] == 'bar' container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert "alpine" in container_spec['Image'] assert container_spec['Labels'] == {'container': 'label'} spec_rollback = service.attrs['Spec'].get('RollbackConfig', None) assert spec_rollback is not None assert ('Order' in spec_rollback and spec_rollback['Order'] == 'start-first') def test_create_with_network(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() network = client.networks.create( helpers.random_name(), driver='overlay' ) service = client.services.create( # create arguments name=name, # ContainerSpec arguments image="alpine", command="sleep 300", networks=[network.id] ) assert 'Networks' in service.attrs['Spec']['TaskTemplate'] networks = service.attrs['Spec']['TaskTemplate']['Networks'] assert len(networks) == 1 assert networks[0]['Target'] == network.id def test_get(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( name=name, image="alpine", command="sleep 300" ) service = client.services.get(service.id) assert service.name == name def test_list_remove(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) assert service in client.services.list() service.remove() assert service not in client.services.list() def test_tasks(self): client = docker.from_env(version=TEST_API_VERSION) service1 = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) service2 = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300" ) tasks = [] while len(tasks) == 0: tasks = service1.tasks() assert len(tasks) == 1 assert tasks[0]['ServiceID'] == service1.id tasks = [] while len(tasks) == 0: tasks = service2.tasks() assert len(tasks) == 1 assert tasks[0]['ServiceID'] == service2.id def test_update(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert container_spec['Command'] == ["sleep", "600"] def test_update_retains_service_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), labels={'service.label': 'SampleLabel'}, # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() labels = service.attrs['Spec']['Labels'] assert labels == {'service.label': 'SampleLabel'} def test_update_retains_container_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300", container_labels={'container.label': 'SampleLabel'} ) service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert container_spec['Labels'] == {'container.label': 'SampleLabel'} def test_update_remove_service_labels(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), labels={'service.label': 'SampleLabel'}, # ContainerSpec arguments image="alpine", command="sleep 300" ) service.update( # create argument name=service.name, labels={}, # ContainerSpec argument command="sleep 600" ) service.reload() assert not service.attrs['Spec'].get('Labels') @pytest.mark.xfail(reason='Flaky test') def test_update_retains_networks(self): client = docker.from_env(version=TEST_API_VERSION) network_name = helpers.random_name() network = client.networks.create( network_name, driver='overlay' ) service = client.services.create( # create arguments name=helpers.random_name(), networks=[network.id], # ContainerSpec arguments image="alpine", command="sleep 300" ) service.reload() service.update( # create argument name=service.name, # ContainerSpec argument command="sleep 600" ) service.reload() networks = service.attrs['Spec']['TaskTemplate']['Networks'] assert networks == [{'Target': network.id}] def test_scale_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) tasks = [] while len(tasks) == 0: tasks = service.tasks() assert len(tasks) == 1 service.update( mode=docker.types.ServiceMode('replicated', replicas=2), ) while len(tasks) == 1: tasks = service.tasks() assert len(tasks) >= 2 # check that the container spec is not overridden with None service.reload() spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert spec.get('Command') == ['sleep', '300'] def test_scale_method_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300", ) tasks = [] while len(tasks) == 0: tasks = service.tasks() assert len(tasks) == 1 service.scale(2) while len(tasks) == 1: tasks = service.tasks() assert len(tasks) >= 2 # check that the container spec is not overridden with None service.reload() spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert spec.get('Command') == ['sleep', '300'] def test_scale_method_global_service(self): client = docker.from_env(version=TEST_API_VERSION) mode = ServiceMode('global') service = client.services.create( name=helpers.random_name(), image="alpine", command="sleep 300", mode=mode ) tasks = [] while len(tasks) == 0: tasks = service.tasks() assert len(tasks) == 1 with pytest.raises(InvalidArgument): service.scale(2) assert len(tasks) == 1 service.reload() spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert spec.get('Command') == ['sleep', '300'] @helpers.requires_api_version('1.25') def test_force_update_service(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) initial_version = service.version assert service.update( # create argument name=service.name, # task template argument force_update=10, # ContainerSpec argument command="sleep 600" ) service.reload() assert service.version > initial_version @helpers.requires_api_version('1.25') def test_force_update_service_using_bool(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) initial_version = service.version assert service.update( # create argument name=service.name, # task template argument force_update=True, # ContainerSpec argument command="sleep 600" ) service.reload() assert service.version > initial_version @helpers.requires_api_version('1.25') def test_force_update_service_using_shorthand_method(self): client = docker.from_env(version=TEST_API_VERSION) service = client.services.create( # create arguments name=helpers.random_name(), # ContainerSpec arguments image="alpine", command="sleep 300" ) initial_version = service.version assert service.force_update() service.reload() assert service.version > initial_version @helpers.requires_api_version('1.41') def test_create_cap_add(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( name=name, labels={'foo': 'bar'}, image="alpine", command="sleep 300", container_labels={'container': 'label'}, cap_add=["CAP_SYSLOG"] ) assert service.name == name assert service.attrs['Spec']['Labels']['foo'] == 'bar' container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert "alpine" in container_spec['Image'] assert container_spec['Labels'] == {'container': 'label'} assert "CAP_SYSLOG" in container_spec["CapabilityAdd"] @helpers.requires_api_version('1.41') def test_create_cap_drop(self): client = docker.from_env(version=TEST_API_VERSION) name = helpers.random_name() service = client.services.create( name=name, labels={'foo': 'bar'}, image="alpine", command="sleep 300", container_labels={'container': 'label'}, cap_drop=["CAP_SYSLOG"] ) assert service.name == name assert service.attrs['Spec']['Labels']['foo'] == 'bar' container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] assert "alpine" in container_spec['Image'] assert container_spec['Labels'] == {'container': 'label'} assert "CAP_SYSLOG" in container_spec["CapabilityDrop"]
ServiceTest
python
geekcomputers__Python
Image-watermarker/watermark.py
{ "start": 116, "end": 1379 }
class ____: def __init__(self): pass def add_text_watermark( self, image, text, text_color, font_style, font_size, position=(0, 0) ): font = ImageFont.truetype(font_style, font_size) draw = ImageDraw.Draw(image) draw.text(position, text, fill=text_color, font=font) return image def add_logo(self, image, logo, position=(0, 0)): if logo.mode != "RGBA": logo = logo.convert("RGBA") if image.mode != "RGBA": image = image.convert("RGBA") if (position[0] + logo.width > image.width) or ( position[1] + logo.height > image.height ): CTkMessagebox(title="Logo position", message="Logo position out of bounds.") image.paste(logo, position, mask=logo) return image def save_image(self, image): save_path = filedialog.asksaveasfilename( defaultextension="*.png", title="Save as", filetypes=[ ("PNG files", "*.png"), ("All files", "*.*"), ], ) if save_path: try: image.save(save_path) except Exception: print("Failed to save image: {e}")
Watermark
python
huggingface__transformers
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
{ "start": 3402, "end": 4011 }
class ____(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdSelfAttention with BigBird->BigBirdPegasus
BigBirdPegasusScaledWordEmbedding
python
realpython__materials
game-of-life-python/source_code_step_4/rplife/views.py
{ "start": 73, "end": 902 }
class ____: def __init__(self, pattern, gen=10, frame_rate=7, bbox=(0, 0, 20, 20)): self.pattern = pattern self.gen = gen self.frame_rate = frame_rate self.bbox = bbox def show(self): curses.wrapper(self._draw) def _draw(self, screen): current_grid = LifeGrid(self.pattern) curses.curs_set(0) screen.clear() try: screen.addstr(0, 0, current_grid.as_string(self.bbox)) except curses.error: raise ValueError( f"Error: terminal too small for pattern '{self.pattern.name}'" ) for _ in range(self.gen): current_grid.evolve() screen.addstr(0, 0, current_grid.as_string(self.bbox)) screen.refresh() sleep(1 / self.frame_rate)
CursesView
python
huggingface__transformers
src/transformers/models/clap/modeling_clap.py
{ "start": 58186, "end": 61376 }
class ____(ClapPreTrainedModel): config: ClapAudioConfig main_input_name = "input_features" input_modalities = "audio" def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_encoder = ClapAudioEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_encoder.patch_embed.proj @auto_docstring def forward( self, input_features: Optional[torch.FloatTensor] = None, is_longer: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapAudioModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused") >>> inputs = processor(audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return self.audio_encoder( input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @auto_docstring( custom_intro=""" The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://huggingface.co/papers/1706.03762 """ )
ClapAudioModel
python
spack__spack
lib/spack/spack/vendor/jinja2/bccache.py
{ "start": 1144, "end": 3172 }
class ____: """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment: "Environment", key: str, checksum: str) -> None: self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self) -> None: """Resets the bucket (unloads the bytecode).""" self.code: t.Optional[CodeType] = None def load_bytecode(self, f: t.BinaryIO) -> None: """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return # if marshal_load fails then we need to reload try: self.code = marshal.load(f) except (EOFError, ValueError, TypeError): self.reset() return def write_bytecode(self, f: t.BinaryIO) -> None: """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal.dump(self.code, f) def bytecode_from_string(self, string: bytes) -> None: """Load bytecode from bytes.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self) -> bytes: """Return the bytecode as bytes.""" out = BytesIO() self.write_bytecode(out) return out.getvalue()
Bucket
python
xlwings__xlwings
xlwings/constants.py
{ "start": 103453, "end": 110839 }
class ____: rgbAliceBlue = 16775408 # from enum XlRgbColor rgbAntiqueWhite = 14150650 # from enum XlRgbColor rgbAqua = 16776960 # from enum XlRgbColor rgbAquamarine = 13959039 # from enum XlRgbColor rgbAzure = 16777200 # from enum XlRgbColor rgbBeige = 14480885 # from enum XlRgbColor rgbBisque = 12903679 # from enum XlRgbColor rgbBlack = 0 # from enum XlRgbColor rgbBlanchedAlmond = 13495295 # from enum XlRgbColor rgbBlue = 16711680 # from enum XlRgbColor rgbBlueViolet = 14822282 # from enum XlRgbColor rgbBrown = 2763429 # from enum XlRgbColor rgbBurlyWood = 8894686 # from enum XlRgbColor rgbCadetBlue = 10526303 # from enum XlRgbColor rgbChartreuse = 65407 # from enum XlRgbColor rgbCoral = 5275647 # from enum XlRgbColor rgbCornflowerBlue = 15570276 # from enum XlRgbColor rgbCornsilk = 14481663 # from enum XlRgbColor rgbCrimson = 3937500 # from enum XlRgbColor rgbDarkBlue = 9109504 # from enum XlRgbColor rgbDarkCyan = 9145088 # from enum XlRgbColor rgbDarkGoldenrod = 755384 # from enum XlRgbColor rgbDarkGray = 11119017 # from enum XlRgbColor rgbDarkGreen = 25600 # from enum XlRgbColor rgbDarkGrey = 11119017 # from enum XlRgbColor rgbDarkKhaki = 7059389 # from enum XlRgbColor rgbDarkMagenta = 9109643 # from enum XlRgbColor rgbDarkOliveGreen = 3107669 # from enum XlRgbColor rgbDarkOrange = 36095 # from enum XlRgbColor rgbDarkOrchid = 13382297 # from enum XlRgbColor rgbDarkRed = 139 # from enum XlRgbColor rgbDarkSalmon = 8034025 # from enum XlRgbColor rgbDarkSeaGreen = 9419919 # from enum XlRgbColor rgbDarkSlateBlue = 9125192 # from enum XlRgbColor rgbDarkSlateGray = 5197615 # from enum XlRgbColor rgbDarkSlateGrey = 5197615 # from enum XlRgbColor rgbDarkTurquoise = 13749760 # from enum XlRgbColor rgbDarkViolet = 13828244 # from enum XlRgbColor rgbDeepPink = 9639167 # from enum XlRgbColor rgbDeepSkyBlue = 16760576 # from enum XlRgbColor rgbDimGray = 6908265 # from enum XlRgbColor rgbDimGrey = 6908265 # from enum XlRgbColor rgbDodgerBlue = 16748574 # from enum XlRgbColor rgbFireBrick = 2237106 # from enum XlRgbColor rgbFloralWhite = 15792895 # from enum XlRgbColor rgbForestGreen = 2263842 # from enum XlRgbColor rgbFuchsia = 16711935 # from enum XlRgbColor rgbGainsboro = 14474460 # from enum XlRgbColor rgbGhostWhite = 16775416 # from enum XlRgbColor rgbGold = 55295 # from enum XlRgbColor rgbGoldenrod = 2139610 # from enum XlRgbColor rgbGray = 8421504 # from enum XlRgbColor rgbGreen = 32768 # from enum XlRgbColor rgbGreenYellow = 3145645 # from enum XlRgbColor rgbGrey = 8421504 # from enum XlRgbColor rgbHoneydew = 15794160 # from enum XlRgbColor rgbHotPink = 11823615 # from enum XlRgbColor rgbIndianRed = 6053069 # from enum XlRgbColor rgbIndigo = 8519755 # from enum XlRgbColor rgbIvory = 15794175 # from enum XlRgbColor rgbKhaki = 9234160 # from enum XlRgbColor rgbLavender = 16443110 # from enum XlRgbColor rgbLavenderBlush = 16118015 # from enum XlRgbColor rgbLawnGreen = 64636 # from enum XlRgbColor rgbLemonChiffon = 13499135 # from enum XlRgbColor rgbLightBlue = 15128749 # from enum XlRgbColor rgbLightCoral = 8421616 # from enum XlRgbColor rgbLightCyan = 9145088 # from enum XlRgbColor rgbLightGoldenrodYellow = 13826810 # from enum XlRgbColor rgbLightGray = 13882323 # from enum XlRgbColor rgbLightGreen = 9498256 # from enum XlRgbColor rgbLightGrey = 13882323 # from enum XlRgbColor rgbLightPink = 12695295 # from enum XlRgbColor rgbLightSalmon = 8036607 # from enum XlRgbColor rgbLightSeaGreen = 11186720 # from enum XlRgbColor rgbLightSkyBlue = 16436871 # from enum XlRgbColor rgbLightSlateGray = 10061943 # from enum XlRgbColor rgbLightSlateGrey = 10061943 # from enum XlRgbColor rgbLightSteelBlue = 14599344 # from enum XlRgbColor rgbLightYellow = 14745599 # from enum XlRgbColor rgbLime = 65280 # from enum XlRgbColor rgbLimeGreen = 3329330 # from enum XlRgbColor rgbLinen = 15134970 # from enum XlRgbColor rgbMaroon = 128 # from enum XlRgbColor rgbMediumAquamarine = 11206502 # from enum XlRgbColor rgbMediumBlue = 13434880 # from enum XlRgbColor rgbMediumOrchid = 13850042 # from enum XlRgbColor rgbMediumPurple = 14381203 # from enum XlRgbColor rgbMediumSeaGreen = 7451452 # from enum XlRgbColor rgbMediumSlateBlue = 15624315 # from enum XlRgbColor rgbMediumSpringGreen = 10156544 # from enum XlRgbColor rgbMediumTurquoise = 13422920 # from enum XlRgbColor rgbMediumVioletRed = 8721863 # from enum XlRgbColor rgbMidnightBlue = 7346457 # from enum XlRgbColor rgbMintCream = 16449525 # from enum XlRgbColor rgbMistyRose = 14804223 # from enum XlRgbColor rgbMoccasin = 11920639 # from enum XlRgbColor rgbNavajoWhite = 11394815 # from enum XlRgbColor rgbNavy = 8388608 # from enum XlRgbColor rgbNavyBlue = 8388608 # from enum XlRgbColor rgbOldLace = 15136253 # from enum XlRgbColor rgbOlive = 32896 # from enum XlRgbColor rgbOliveDrab = 2330219 # from enum XlRgbColor rgbOrange = 42495 # from enum XlRgbColor rgbOrangeRed = 17919 # from enum XlRgbColor rgbOrchid = 14053594 # from enum XlRgbColor rgbPaleGoldenrod = 7071982 # from enum XlRgbColor rgbPaleGreen = 10025880 # from enum XlRgbColor rgbPaleTurquoise = 15658671 # from enum XlRgbColor rgbPaleVioletRed = 9662683 # from enum XlRgbColor rgbPapayaWhip = 14020607 # from enum XlRgbColor rgbPeachPuff = 12180223 # from enum XlRgbColor rgbPeru = 4163021 # from enum XlRgbColor rgbPink = 13353215 # from enum XlRgbColor rgbPlum = 14524637 # from enum XlRgbColor rgbPowderBlue = 15130800 # from enum XlRgbColor rgbPurple = 8388736 # from enum XlRgbColor rgbRed = 255 # from enum XlRgbColor rgbRosyBrown = 9408444 # from enum XlRgbColor rgbRoyalBlue = 14772545 # from enum XlRgbColor rgbSalmon = 7504122 # from enum XlRgbColor rgbSandyBrown = 6333684 # from enum XlRgbColor rgbSeaGreen = 5737262 # from enum XlRgbColor rgbSeashell = 15660543 # from enum XlRgbColor rgbSienna = 2970272 # from enum XlRgbColor rgbSilver = 12632256 # from enum XlRgbColor rgbSkyBlue = 15453831 # from enum XlRgbColor rgbSlateBlue = 13458026 # from enum XlRgbColor rgbSlateGray = 9470064 # from enum XlRgbColor rgbSlateGrey = 9470064 # from enum XlRgbColor rgbSnow = 16448255 # from enum XlRgbColor rgbSpringGreen = 8388352 # from enum XlRgbColor rgbSteelBlue = 11829830 # from enum XlRgbColor rgbTan = 9221330 # from enum XlRgbColor rgbTeal = 8421376 # from enum XlRgbColor rgbThistle = 14204888 # from enum XlRgbColor rgbTomato = 4678655 # from enum XlRgbColor rgbTurquoise = 13688896 # from enum XlRgbColor rgbViolet = 15631086 # from enum XlRgbColor rgbWheat = 11788021 # from enum XlRgbColor rgbWhite = 16777215 # from enum XlRgbColor rgbWhiteSmoke = 16119285 # from enum XlRgbColor rgbYellow = 65535 # from enum XlRgbColor rgbYellowGreen = 3329434 # from enum XlRgbColor
RgbColor