language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
hyperopt__hyperopt
hyperopt/spark.py
{ "start": 841, "end": 12302 }
class ____(Trials): """ Implementation of hyperopt.Trials supporting distributed execution using Apache Spark clusters. This requires fmin to be run on a Spark cluster. Plugging SparkTrials into hyperopt.fmin() allows hyperopt to send model training and evaluation tasks to Spark workers, parallelizing hyperparameter search. Each trial (set of hyperparameter values) is handled within a single Spark task; i.e., each model will be fit and evaluated on a single worker machine. Trials are run asynchronously. See hyperopt.Trials docs for general information about Trials. The fields we store in our trial docs match the base Trials class. The fields include: - 'tid': trial ID - 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc. - 'result': evaluation result for completed trial run - 'refresh_time': timestamp for last status update - 'misc': includes: - 'error': (error type, error message) - 'book_time': timestamp for trial run start """ asynchronous = True # Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128. MAX_CONCURRENT_JOBS_ALLOWED = 128 def __str__(self): return f"SparkTrials(trials={self.trials})" def __init__( self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None, resource_profile=None, ): """ :param parallelism: Maximum number of parallel trials to run, i.e., maximum number of concurrent Spark tasks. The actual parallelism is subject to available Spark task slots at runtime. If set to None (default) or a non-positive value, this will be set to Spark's default parallelism or `1`. We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`. :param timeout: Maximum time (in seconds) which fmin is allowed to take. If this timeout is hit, then fmin will cancel running and proposed trials. It will retain all completed trial runs and return the best result found so far. :param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt to use an existing SparkSession or create a new one. SparkSession is the entry point for various facilities provided by Spark. For more information, visit the documentation for PySpark. :param resource_profile: A ResourceProfile object. If not None, SparkTrials will use resources specified by the profile in Spark training tasks. """ super().__init__(exp_key=None, refresh=False) if not _have_spark: raise Exception( "SparkTrials cannot import pyspark classes. Make sure that PySpark " "is available in your environment. E.g., try running 'import pyspark'" ) validate_timeout(timeout) validate_loss_threshold(loss_threshold) self._spark = ( SparkSession.builder.getOrCreate() if spark_session is None else spark_session ) self._spark_context = self._spark.sparkContext self._spark_pinned_threads_enabled = isinstance( self._spark_context._gateway, ClientServer ) # The feature to support controlling jobGroupIds is in SPARK-22340 self._spark_supports_job_cancelling = ( self._spark_pinned_threads_enabled or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup") ) spark_default_parallelism = self._spark_context.defaultParallelism self.parallelism = self._decide_parallelism( requested_parallelism=parallelism, spark_default_parallelism=spark_default_parallelism, ) self.user_specified_parallelism = parallelism self._spark_supports_resource_profile = hasattr( self._spark_context.parallelize([1]), "withResources" ) and not self._spark.conf.get("spark.master", "").startswith("local") if self._spark_supports_resource_profile: self._resource_profile = resource_profile else: self._resource_profile = None if resource_profile is not None: logger.warning( "SparkTrials was constructed with a ResourceProfile, but this Apache " "Spark version does not support stage-level scheduling." ) if not self._spark_supports_job_cancelling and timeout is not None: logger.warning( "SparkTrials was constructed with a timeout specified, but this Apache " "Spark version does not support job group-based cancellation. The " "timeout will be respected when starting new Spark jobs, but " "SparkTrials will not be able to cancel running Spark jobs which exceed" " the timeout." ) self.timeout = timeout self.loss_threshold = loss_threshold self._fmin_cancelled = False self._fmin_cancelled_reason = None self.refresh() @staticmethod def _decide_parallelism(requested_parallelism, spark_default_parallelism): """ Given the requested parallelism, return the max parallelism SparkTrials will actually use. See the docstring for `parallelism` in the constructor for expected behavior. """ if requested_parallelism is None or requested_parallelism <= 0: parallelism = max(spark_default_parallelism, 1) logger.warning( "Because the requested parallelism was None or a non-positive value, " "parallelism will be set to ({d}), which is Spark's default parallelism ({s}), " "or 1, whichever is greater. " "We recommend setting parallelism explicitly to a positive value because " "the total of Spark task slots is subject to cluster sizing.".format( d=parallelism, s=spark_default_parallelism ) ) else: parallelism = requested_parallelism if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED: logger.warning( "Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format( p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ) ) parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED return parallelism @property def fmin_cancelled_reason(self): return self._fmin_cancelled_reason def count_successful_trials(self): """ Returns the current number of trials which ran successfully """ return self.count_by_state_unsynced(base.JOB_STATE_DONE) def count_failed_trials(self): """ Returns the current number of trial runs which failed """ return self.count_by_state_unsynced(base.JOB_STATE_ERROR) def count_cancelled_trials(self): """ Returns the current number of cancelled trial runs. This covers trials which are cancelled from exceeding the timeout. """ return self.count_by_state_unsynced(base.JOB_STATE_CANCEL) def count_total_trials(self): """ Returns the current number of all successful, failed, and cancelled trial runs """ total_states = [ base.JOB_STATE_DONE, base.JOB_STATE_ERROR, base.JOB_STATE_CANCEL, ] return self.count_by_state_unsynced(total_states) def delete_all(self): """ Reset the Trials to init state """ super().delete_all() self._fmin_cancelled = False self._fmin_cancelled_reason = None def trial_attachments(self, trial): raise NotImplementedError("SparkTrials does not support trial attachments.") def fmin( self, fn, space, algo, max_evals, timeout, loss_threshold, max_queue_len, rstate, verbose, pass_expr_memo_ctrl, catch_eval_exceptions, return_argmin, show_progressbar, early_stop_fn, trials_save_file="", ): """ This should not be called directly but is called via :func:`hyperopt.fmin` Refer to :func:`hyperopt.fmin` for docs on each argument """ if timeout is not None: if self.timeout is not None: logger.warning( "Timeout param was defined in Trials object, ignoring fmin definition" ) else: validate_timeout(timeout) self.timeout = timeout if loss_threshold is not None: validate_loss_threshold(loss_threshold) self.loss_threshold = loss_threshold assert ( not pass_expr_memo_ctrl ), "SparkTrials does not support `pass_expr_memo_ctrl`" assert ( not catch_eval_exceptions ), "SparkTrials does not support `catch_eval_exceptions`" state = _SparkFMinState( self._spark, self._resource_profile, fn, space, self, early_stop_fn=early_stop_fn, ) # Will launch a dispatcher thread which runs each trial task as one spark job. state.launch_dispatcher() try: res = fmin( fn, space, algo, max_evals, timeout=timeout, loss_threshold=loss_threshold, max_queue_len=max_queue_len, trials=self, allow_trials_fmin=False, # -- prevent recursion rstate=rstate, pass_expr_memo_ctrl=None, # not supported catch_eval_exceptions=catch_eval_exceptions, verbose=verbose, return_argmin=return_argmin, points_to_evaluate=None, # not supported show_progressbar=show_progressbar, # do not check early stopping in fmin. SparkTrials early stopping is implemented in run_dispatcher early_stop_fn=None, trials_save_file="", # not supported ) except KeyboardInterrupt as e: self._fmin_cancelled = True self._fmin_cancelled_reason = FMIN_CANCELLED_REASON_USER logger.debug("fmin thread terminated by user.") raise e except BaseException as e: logger.debug("fmin thread exits with an exception raised.") raise e else: logger.debug("fmin thread exits normally.") return res finally: state.wait_for_all_threads() logger.info( f"Total Trials: {self.count_total_trials()}: {self.count_successful_trials()} succeeded, {self.count_failed_trials()} failed, {self.count_cancelled_trials()} cancelled." )
SparkTrials
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 131173, "end": 131363 }
class ____(str, Enum): """ Storage on disk (rocksdb storage) """ def __str__(self) -> str: return str(self.value) ON_DISK = "on_disk"
SparseVectorStorageTypeOneOf
python
sympy__sympy
sympy/codegen/ast.py
{ "start": 11986, "end": 12398 }
class ____(Token): """ Represents 'continue' in C/Python ('cycle' in Fortran) Use the premade instance ``continue_`` or instantiate manually. Examples ======== >>> from sympy import ccode, fcode >>> from sympy.codegen.ast import continue_ >>> ccode(continue_) 'continue' >>> fcode(continue_, source_format='free') 'cycle' """ continue_ = ContinueToken()
ContinueToken
python
huggingface__transformers
src/transformers/models/gpt_neox/modular_gpt_neox.py
{ "start": 8577, "end": 10972 }
class ____(GradientCheckpointingLayer): def __init__(self, config, layer_idx): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_dropout = nn.Dropout(config.hidden_dropout) self.post_mlp_dropout = nn.Dropout(config.hidden_dropout) self.attention = GPTNeoXAttention(config, layer_idx) self.mlp = GPTNeoXMLP(config) def forward( self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, layer_past: Optional[Cache] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[FlashAttentionKwargs], ): attn_output, attn_weights = self.attention( self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) attn_output = self.post_attention_dropout(attn_output) if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output + hidden_states else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
GPTNeoXLayer
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/map_test.py
{ "start": 66395, "end": 69078 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.v2_only_combinations(), combinations.combine( dataset_range=[100], num_parallel_calls=[None, 2, dataset_ops.AUTOTUNE], deterministic=[True, False]))) def testMapV2( # V2 API preserves cardinality by default. self, dataset_range: int, num_parallel_calls: int, deterministic: bool): dataset = dataset_ops.Dataset.range(dataset_range) dataset = dataset.map( lambda x: x * 2, num_parallel_calls=num_parallel_calls, deterministic=deterministic) dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE) dataset = global_shuffle_op._global_shuffle(dataset) # Disables optimizations (e.g.: `map_parallelization`), to make sure we test # both `Map` and `ParallelMap`. # TODO(b/325112575): Support warm-start. With warm-start, prefetching uses # the unintended IteratorContext here: # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/data/prefetch_dataset_op.cc#L197-L199. options = options_lib.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_warm_start = False dataset = dataset.with_options(options) expected = list(range(0, dataset_range * 2, 2)) dataset_output = self.getDatasetOutput( dataset, requires_initialization=True) self.assertCountEqual(dataset_output, expected) self.assertNotEqual(dataset_output, expected) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( dataset_range=[100], num_parallel_calls=[None, 2, dataset_ops.AUTOTUNE], deterministic=[True, False]))) def testMapV1AndV2( self, dataset_range: int, num_parallel_calls: int, deterministic: bool): dataset = dataset_ops.Dataset.range(dataset_range) dataset_cardinality = dataset.cardinality() dataset = dataset.map( lambda x: x * 2, num_parallel_calls=num_parallel_calls, deterministic=deterministic) dataset = dataset.apply(cardinality.assert_cardinality(dataset_cardinality)) dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE) dataset = global_shuffle_op._global_shuffle(dataset) expected = list(range(0, dataset_range * 2, 2)) dataset_output = self.getDatasetOutput( dataset, requires_initialization=True) self.assertCountEqual(dataset_output, expected) self.assertNotEqual(dataset_output, expected)
MapGlobalShuffleTest
python
plotly__plotly.py
plotly/graph_objs/sankey/link/_line.py
{ "start": 233, "end": 4565 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "sankey.link" _path_str = "sankey.link.line" _valid_props = {"color", "colorsrc", "width", "widthsrc"} @property def color(self): """ Sets the color of the `line` around each `link`. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for `color`. The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"] @colorsrc.setter def colorsrc(self, val): self["colorsrc"] = val @property def width(self): """ Sets the width (in px) of the `line` around each `link`. The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def widthsrc(self): """ Sets the source reference on Chart Studio Cloud for `width`. The 'widthsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["widthsrc"] @widthsrc.setter def widthsrc(self, val): self["widthsrc"] = val @property def _prop_descriptions(self): return """\ color Sets the color of the `line` around each `link`. colorsrc Sets the source reference on Chart Studio Cloud for `color`. width Sets the width (in px) of the `line` around each `link`. widthsrc Sets the source reference on Chart Studio Cloud for `width`. """ def __init__( self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs ): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.sankey.link.Line` color Sets the color of the `line` around each `link`. colorsrc Sets the source reference on Chart Studio Cloud for `color`. width Sets the width (in px) of the `line` around each `link`. widthsrc Sets the source reference on Chart Studio Cloud for `width`. Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.sankey.link.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.sankey.link.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("colorsrc", arg, colorsrc) self._set_property("width", arg, width) self._set_property("widthsrc", arg, widthsrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
getsentry__sentry
src/sentry/codecov/endpoints/test_suites/test_suites.py
{ "start": 824, "end": 2224 }
class ____(CodecovEndpoint): __test__ = False owner = ApiOwner.CODECOV publish_status = { "GET": ApiPublishStatus.PUBLIC, } @extend_schema( operation_id="Retrieve test suites belonging to a repository's test results", parameters=[ GlobalParams.ORG_ID_OR_SLUG, PreventParams.OWNER, PreventParams.REPOSITORY, PreventParams.TERM, ], request=None, responses={ 200: TestSuiteSerializer, 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def get(self, request: Request, owner: RpcIntegration, repository: str, **kwargs) -> Response: """ Retrieves test suites belonging to a repository's test results. It accepts a list of test suites as a query parameter to specify individual test suites. """ owner_slug = owner.name variables = { "owner": owner_slug, "repo": repository, "term": request.query_params.get("term", ""), } client = CodecovApiClient(git_provider_org=owner_slug) graphql_response = client.query(query=query, variables=variables) test_suites = TestSuiteSerializer().to_representation(graphql_response.json()) return Response(test_suites)
TestSuitesEndpoint
python
django__django
tests/staticfiles_tests/test_management.py
{ "start": 13002, "end": 15537 }
class ____(CollectionTestCase): overwrite_warning_msg = "This will overwrite existing files!" delete_warning_msg = "This will DELETE ALL FILES in this location!" files_copied_msg = "static files copied" @staticmethod def mock_input(stdout): def _input(msg): stdout.write(msg) return "yes" return _input def test_warning_when_clearing_staticdir(self): stdout = StringIO() self.run_collectstatic() with mock.patch("builtins.input", side_effect=self.mock_input(stdout)): call_command("collectstatic", interactive=True, clear=True, stdout=stdout) output = stdout.getvalue() self.assertNotIn(self.overwrite_warning_msg, output) self.assertIn(self.delete_warning_msg, output) def test_warning_when_overwriting_files_in_staticdir(self): stdout = StringIO() self.run_collectstatic() with mock.patch("builtins.input", side_effect=self.mock_input(stdout)): call_command("collectstatic", interactive=True, stdout=stdout) output = stdout.getvalue() self.assertIn(self.overwrite_warning_msg, output) self.assertNotIn(self.delete_warning_msg, output) def test_no_warning_when_staticdir_does_not_exist(self): stdout = StringIO() shutil.rmtree(settings.STATIC_ROOT) call_command("collectstatic", interactive=True, stdout=stdout) output = stdout.getvalue() self.assertNotIn(self.overwrite_warning_msg, output) self.assertNotIn(self.delete_warning_msg, output) self.assertIn(self.files_copied_msg, output) def test_no_warning_for_empty_staticdir(self): stdout = StringIO() with tempfile.TemporaryDirectory( prefix="collectstatic_empty_staticdir_test" ) as static_dir: with override_settings(STATIC_ROOT=static_dir): call_command("collectstatic", interactive=True, stdout=stdout) output = stdout.getvalue() self.assertNotIn(self.overwrite_warning_msg, output) self.assertNotIn(self.delete_warning_msg, output) self.assertIn(self.files_copied_msg, output) def test_cancelled(self): self.run_collectstatic() with mock.patch("builtins.input", side_effect=lambda _: "no"): with self.assertRaisesMessage( CommandError, "Collecting static files cancelled" ): call_command("collectstatic", interactive=True)
TestInteractiveMessages
python
django__django
tests/staticfiles_tests/test_handlers.py
{ "start": 401, "end": 1621 }
class ____(StaticFilesTestCase): async_request_factory = AsyncRequestFactory() async def test_get_async_response(self): request = self.async_request_factory.get("/static/test/file.txt") handler = ASGIStaticFilesHandler(ASGIHandler()) response = await handler.get_response_async(request) response.close() self.assertEqual(response.status_code, 200) async def test_get_async_response_not_found(self): request = self.async_request_factory.get("/static/test/not-found.txt") handler = ASGIStaticFilesHandler(ASGIHandler()) response = await handler.get_response_async(request) self.assertEqual(response.status_code, 404) async def test_non_http_requests_passed_to_the_wrapped_application(self): tests = [ "/static/path.txt", "/non-static/path.txt", ] for path in tests: with self.subTest(path=path): scope = {"type": "websocket", "path": path} handler = ASGIStaticFilesHandler(MockApplication()) response = await handler(scope, None, None) self.assertEqual(response, "Application called")
TestASGIStaticFilesHandler
python
django__django
tests/gis_tests/test_geoforms.py
{ "start": 374, "end": 8632 }
class ____(SimpleTestCase): def test_init(self): "Testing GeometryField initialization with defaults." fld = forms.GeometryField() for bad_default in ("blah", 3, "FoO", None, 0): with self.subTest(bad_default=bad_default): with self.assertRaises(ValidationError): fld.clean(bad_default) def test_srid(self): "Testing GeometryField with a SRID set." # Input that doesn't specify the SRID is assumed to be in the SRID # of the input field. fld = forms.GeometryField(srid=4326) geom = fld.clean("POINT(5 23)") self.assertEqual(4326, geom.srid) # Making the field in a different SRID from that of the geometry, and # asserting it transforms. fld = forms.GeometryField(srid=32140) # Different PROJ versions use different transformations, all are # correct as having a 1 meter accuracy. tol = 1 xform_geom = GEOSGeometry( "POINT (951640.547328465 4219369.26171664)", srid=32140 ) # The cleaned geometry is transformed to 32140 (the widget map_srid is # 3857). cleaned_geom = fld.clean( "SRID=3857;POINT (-10615777.40976205 3473169.895707852)" ) self.assertEqual(cleaned_geom.srid, 32140) self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol)) def test_null(self): "Testing GeometryField's handling of null (None) geometries." # Form fields, by default, are required (`required=True`) fld = forms.GeometryField() with self.assertRaisesMessage(ValidationError, "No geometry value provided."): fld.clean(None) # This will clean None as a geometry (See #10660). fld = forms.GeometryField(required=False) self.assertIsNone(fld.clean(None)) def test_geom_type(self): "Testing GeometryField's handling of different geometry types." # By default, all geometry types are allowed. fld = forms.GeometryField() for wkt in ( "POINT(5 23)", "MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))", "LINESTRING(0 0, 1 1)", ): with self.subTest(wkt=wkt): # to_python() uses the SRID of OpenLayersWidget if the # converted value doesn't have an SRID. self.assertEqual( GEOSGeometry(wkt, srid=fld.widget.map_srid), fld.clean(wkt) ) pnt_fld = forms.GeometryField(geom_type="POINT") self.assertEqual( GEOSGeometry("POINT(5 23)", srid=pnt_fld.widget.map_srid), pnt_fld.clean("POINT(5 23)"), ) # a WKT for any other geom_type will be properly transformed by # `to_python` self.assertEqual( GEOSGeometry("LINESTRING(0 0, 1 1)", srid=pnt_fld.widget.map_srid), pnt_fld.to_python("LINESTRING(0 0, 1 1)"), ) # but rejected by `clean` with self.assertRaises(ValidationError): pnt_fld.clean("LINESTRING(0 0, 1 1)") def test_to_python(self): """ to_python() either returns a correct GEOSGeometry object or a ValidationError. """ good_inputs = [ "POINT(5 23)", "MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))", "LINESTRING(0 0, 1 1)", ] bad_inputs = [ "POINT(5)", "MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))", "BLAH(0 0, 1 1)", '{"type": "FeatureCollection", "features": [' '{"geometry": {"type": "Point", "coordinates": [508375, 148905]}, ' '"type": "Feature"}]}', ] fld = forms.GeometryField() # to_python returns the same GEOSGeometry for a WKT for geo_input in good_inputs: with self.subTest(geo_input=geo_input): self.assertEqual( GEOSGeometry(geo_input, srid=fld.widget.map_srid), fld.to_python(geo_input), ) # but raises a ValidationError for any other string for geo_input in bad_inputs: with self.subTest(geo_input=geo_input): with self.assertRaises(ValidationError): fld.to_python(geo_input) def test_to_python_different_map_srid(self): f = forms.GeometryField(widget=OpenLayersWidget) json = '{ "type": "Point", "coordinates": [ 5.0, 23.0 ] }' self.assertEqual( GEOSGeometry("POINT(5 23)", srid=f.widget.map_srid), f.to_python(json) ) def test_field_with_text_widget(self): class PointForm(forms.Form): pt = forms.PointField(srid=4326, widget=forms.TextInput) form = PointForm() cleaned_pt = form.fields["pt"].clean("POINT(5 23)") self.assertEqual(cleaned_pt, GEOSGeometry("POINT(5 23)", srid=4326)) self.assertEqual(4326, cleaned_pt.srid) with self.assertRaisesMessage(ValidationError, "Invalid geometry value."): form.fields["pt"].clean("POINT(5)") point = GEOSGeometry("SRID=4326;POINT(5 23)") form = PointForm(data={"pt": "POINT(5 23)"}, initial={"pt": point}) self.assertFalse(form.has_changed()) def test_field_string_value(self): """ Initialization of a geometry field with a valid/empty/invalid string. Only the invalid string should trigger an error log entry. """ class PointForm(forms.Form): pt1 = forms.PointField(srid=4326) pt2 = forms.PointField(srid=4326) pt3 = forms.PointField(srid=4326) form = PointForm( { "pt1": "SRID=4326;POINT(7.3 44)", # valid "pt2": "", # empty "pt3": "PNT(0)", # invalid } ) with self.assertLogs("django.contrib.gis", "ERROR") as logger_calls: output = str(form) # The first point can't use assertInHTML() due to non-deterministic # ordering of the rendered dictionary. pt1_serialized = re.search(r"<textarea [^>]*>({[^<]+})<", output)[1] pt1_json = pt1_serialized.replace("&quot;", '"') pt1_expected = GEOSGeometry(form.data["pt1"]).transform(3857, clone=True) self.assertJSONEqual(pt1_json, pt1_expected.json) self.assertInHTML( '<textarea id="id_pt2" class="vSerializedField required" cols="150"' ' rows="10" name="pt2" hidden></textarea>', output, ) self.assertInHTML( '<textarea id="id_pt3" class="vSerializedField required" cols="150"' ' rows="10" name="pt3" hidden></textarea>', output, ) # Only the invalid PNT(0) triggers an error log entry. # Deserialization is called in form clean and in widget rendering. self.assertEqual(len(logger_calls.records), 2) self.assertEqual( logger_calls.records[0].getMessage(), "Error creating geometry from value 'PNT(0)' (String input " "unrecognized as WKT EWKT, and HEXEWKB.)", ) def test_override_attrs(self): self.assertIsNone(forms.BaseGeometryWidget.base_layer) self.assertEqual(forms.BaseGeometryWidget.geom_type, "GEOMETRY") self.assertEqual(forms.BaseGeometryWidget.map_srid, 4326) self.assertIs(forms.BaseGeometryWidget.display_raw, False) class PointForm(forms.Form): p = forms.PointField( widget=forms.OpenLayersWidget( attrs={ "base_layer": "some-test-file", "map_srid": 1234, } ), ) form = PointForm() rendered = form.as_p() attrs = { "base_layer": "some-test-file", "geom_type": "POINT", "map_srid": 1234, "display_raw": False, "required": True, "id": "id_p", "geom_name": "Point", } expected = json_script(attrs, "id_p_mapwidget_options") self.assertInHTML(expected, rendered)
GeometryFieldTest
python
django__django
tests/delete_regress/tests.py
{ "start": 14614, "end": 15122 }
class ____(TestCase): def test_set_querycount(self): policy = Policy.objects.create() version = Version.objects.create(policy=policy) location = Location.objects.create(version=version) Item.objects.create( version=version, location=location, location_value=location, ) # 2 UPDATEs for SET of item values and one for DELETE locations. with self.assertNumQueries(3): location.delete()
SetQueryCountTests
python
pypa__pip
src/pip/_vendor/rich/measure.py
{ "start": 262, "end": 5305 }
class ____(NamedTuple): """Stores the minimum and maximum widths (in characters) required to render an object.""" minimum: int """Minimum number of cells required to render.""" maximum: int """Maximum number of cells required to render.""" @property def span(self) -> int: """Get difference between maximum and minimum.""" return self.maximum - self.minimum def normalize(self) -> "Measurement": """Get measurement that ensures that minimum <= maximum and minimum >= 0 Returns: Measurement: A normalized measurement. """ minimum, maximum = self minimum = min(max(0, minimum), maximum) return Measurement(max(0, minimum), max(0, max(minimum, maximum))) def with_maximum(self, width: int) -> "Measurement": """Get a RenderableWith where the widths are <= width. Args: width (int): Maximum desired width. Returns: Measurement: New Measurement object. """ minimum, maximum = self return Measurement(min(minimum, width), min(maximum, width)) def with_minimum(self, width: int) -> "Measurement": """Get a RenderableWith where the widths are >= width. Args: width (int): Minimum desired width. Returns: Measurement: New Measurement object. """ minimum, maximum = self width = max(0, width) return Measurement(max(minimum, width), max(maximum, width)) def clamp( self, min_width: Optional[int] = None, max_width: Optional[int] = None ) -> "Measurement": """Clamp a measurement within the specified range. Args: min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. Returns: Measurement: New Measurement object. """ measurement = self if min_width is not None: measurement = measurement.with_minimum(min_width) if max_width is not None: measurement = measurement.with_maximum(max_width) return measurement @classmethod def get( cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" ) -> "Measurement": """Get a measurement for a renderable. Args: console (~rich.console.Console): Console instance. options (~rich.console.ConsoleOptions): Console options. renderable (RenderableType): An object that may be rendered with Rich. Raises: errors.NotRenderableError: If the object is not renderable. Returns: Measurement: Measurement object containing range of character widths required to render the object. """ _max_width = options.max_width if _max_width < 1: return Measurement(0, 0) if isinstance(renderable, str): renderable = console.render_str( renderable, markup=options.markup, highlight=False ) renderable = rich_cast(renderable) if is_renderable(renderable): get_console_width: Optional[ Callable[["Console", "ConsoleOptions"], "Measurement"] ] = getattr(renderable, "__rich_measure__", None) if get_console_width is not None: render_width = ( get_console_width(console, options) .normalize() .with_maximum(_max_width) ) if render_width.maximum < 1: return Measurement(0, 0) return render_width.normalize() else: return Measurement(0, _max_width) else: raise errors.NotRenderableError( f"Unable to get render width for {renderable!r}; " "a str, Segment, or object with __rich_console__ method is required" ) def measure_renderables( console: "Console", options: "ConsoleOptions", renderables: Sequence["RenderableType"], ) -> "Measurement": """Get a measurement that would fit a number of renderables. Args: console (~rich.console.Console): Console instance. options (~rich.console.ConsoleOptions): Console options. renderables (Iterable[RenderableType]): One or more renderable objects. Returns: Measurement: Measurement object containing range of character widths required to contain all given renderables. """ if not renderables: return Measurement(0, 0) get_measurement = Measurement.get measurements = [ get_measurement(console, options, renderable) for renderable in renderables ] measured_width = Measurement( max(measurements, key=itemgetter(0)).minimum, max(measurements, key=itemgetter(1)).maximum, ) return measured_width
Measurement
python
google__pytype
pytype/state.py
{ "start": 7029, "end": 14576 }
class ____(utils.ContextWeakrefMixin): """An interpreter frame. This contains the local value and block stacks and the associated code and pointer. The most complex usage is with generators in which a frame is stored and then repeatedly reactivated. Other than that frames are created executed and then discarded. Attributes: f_code: The code object this frame is executing. f_globals: The globals dict used for global name resolution. f_locals: The locals used for name resolution. Will be modified by Frame.__init__ if callargs is passed. f_builtins: Similar for builtins. f_back: The frame above self on the stack. f_lineno: The first line number of the code object. ctx: The abstract context we belong to. node: The node at which the frame is created. states: A mapping from opcodes to FrameState objects. cells: local variables bound in a closure, or used in a closure. block_stack: A stack of blocks used to manage exceptions, loops, and "with"s. data_stack: The value stack that is used for instruction operands. allowed_returns: The return annotation of this function. check_return: Whether the actual return type of a call should be checked against allowed_returns. return_variable: The return value of this function, as a Variable. yield_variable: The yield value of this function, as a Variable. """ def __init__( self, node: cfg.CFGNode, ctx: _ContextType, f_code: blocks.OrderedCode, f_globals: abstract.LazyConcreteDict, f_locals: abstract.LazyConcreteDict, f_back: FrameType, callargs: dict[str, cfg.Variable], closure: tuple[cfg.Variable, ...] | None, func: cfg.Binding | None, first_arg: cfg.Variable | None, substs: Collection[dict[str, cfg.Variable]], ): """Initialize a special frame as needed by TypegraphVirtualMachine. Args: node: The current CFG graph node. ctx: The owning abstract context. f_code: The code object to execute in this frame. f_globals: The global context to execute in as a SimpleValue as used by TypegraphVirtualMachine. f_locals: Local variables. Will be modified if callargs is passed. f_back: The frame above this one on the stack. callargs: Additional function arguments to store in f_locals. closure: A tuple containing the new co_freevars. func: Optionally, a binding to the function this frame corresponds to. first_arg: First argument to the function. substs: Maps from type parameter names in scope for this frame to their possible values. Raises: NameError: If we can't resolve any references into the outer frame. """ super().__init__(ctx) self.node = node self.current_opcode = None self.f_code = f_code self.states = {} self.f_globals = f_globals self.f_locals = f_locals self.f_back = f_back if f_back and f_back.f_builtins: self.f_builtins = f_back.f_builtins else: _, bltin = self.ctx.attribute_handler.get_attribute( self.ctx.root_node, f_globals, "__builtins__" ) (builtins_pu,) = bltin.bindings self.f_builtins = builtins_pu.data self.f_lineno = f_code.firstlineno # The first argument is used to make Python 3 super calls when super is not # passed any arguments. self.first_arg = first_arg self.allowed_returns = None self.check_return = False self.return_variable = self.ctx.program.NewVariable() self.yield_variable = self.ctx.program.NewVariable() # Keep track of the current opcode block and and block targets we add while # executing it; they can potentially be removed if the block returns early. self.current_block = None self.targets = collections.defaultdict(list) # A map from function name to @typing.overload-decorated signatures. The # overloads are copied to the implementation in InterpreterFunction.make. self.overloads = collections.defaultdict(list) # A closure g communicates with its outer function f through two # fields in CodeType (both of which are tuples of strings): # f.co_cellvars: All f-local variables that are used in g (or any other # closure). # g.co_freevars: All variables from f that g uses. # Also, note that f.co_cellvars will only also be in f.co_varnames # if they are also parameters of f (because co_varnames[0:co_argcount] are # always the parameters), but won't otherwise. # Cells 0 .. num(cellvars)-1 : cellvar; num(cellvars) .. end : freevar self.closure = closure freevars = closure or [] assert len(f_code.freevars) == len(freevars) if self.ctx.python_version < (3, 11): cell_names = f_code.cellvars elif freevars: cell_names = f_code.localsplus[: -len(freevars)] else: cell_names = f_code.localsplus self.cells = [self.ctx.program.NewVariable() for _ in cell_names] self.cells.extend(freevars) if callargs: for name, value in sorted(callargs.items()): if name in f_code.cellvars: i = cell_names.index(name) self.cells[i].PasteVariable(value, node) else: self.ctx.attribute_handler.set_attribute(node, f_locals, name, value) # Python 3 supports calling 'super' without any arguments. In such a case # the implicit type argument is inserted into __build_class__'s cellvars # and propagated as a closure variable to all method/functions calling # 'super' without any arguments. # If this is a frame for the function called by __build_class__ (see # abstract.BuildClass), then we will store a reference to the variable # corresponding to the cellvar named "__class__" separately for convenient # access. After the class is built, abstract.BuildClass.call will add the # binding for the new class into this variable. self.class_closure_var = None if func and isinstance(func.data, abstract.InterpreterFunction): closure_name = abstract.BuildClass.CLOSURE_NAME if func.data.is_class_builder and closure_name in f_code.cellvars: self.class_closure_var = self.get_cell_by_name(closure_name) self.func = func self.substs = substs # Do not add to error tracebacks self.skip_in_tracebacks = False # Set the module name (used in logging) if f_code.filename: self.module_name = module_utils.path_to_module_name(f_code.filename) else: self.module_name = "" # All InterpreterFunction objects created while this frame was at the top of # the frame stack. self.functions_created_in_frame: dict[ str, list[abstract.InterpreterFunction] ] = collections.defaultdict(list) def __repr__(self): # pragma: no cover return "<Frame at 0x%08x: %r @ %d>" % ( id(self), self.f_code.filename, self.f_lineno, ) def copy_free_vars(self, n): offset = len(self.cells) - len(self.f_code.freevars) for i in range(n): self.cells[i + offset] = self.closure[i] @property def type_params(self): return set(itertools.chain.from_iterable(self.substs)) def lookup_name(self, target_name): for store in (self.f_locals, self.f_globals, self.f_builtins): if store is not None and target_name in store.members: return store.members[target_name] return self.get_cell_by_name(target_name) def get_cell_by_name(self, name): return self.cells[self.f_code.get_cell_index(name)]
Frame
python
pypa__setuptools
setuptools/_vendor/jaraco/collections/__init__.py
{ "start": 9127, "end": 10882 }
class ____(dict): """ A dict subclass that transforms the keys before they're used. Subclasses may override the default transform_key to customize behavior. """ @staticmethod def transform_key(key): # pragma: nocover return key def __init__(self, *args, **kargs): super().__init__() # build a dictionary using the default constructs d = dict(*args, **kargs) # build this dictionary using transformed keys. for item in d.items(): self.__setitem__(*item) def __setitem__(self, key, val): key = self.transform_key(key) super().__setitem__(key, val) def __getitem__(self, key): key = self.transform_key(key) return super().__getitem__(key) def __contains__(self, key): key = self.transform_key(key) return super().__contains__(key) def __delitem__(self, key): key = self.transform_key(key) return super().__delitem__(key) def get(self, key, *args, **kwargs): key = self.transform_key(key) return super().get(key, *args, **kwargs) def setdefault(self, key, *args, **kwargs): key = self.transform_key(key) return super().setdefault(key, *args, **kwargs) def pop(self, key, *args, **kwargs): key = self.transform_key(key) return super().pop(key, *args, **kwargs) def matching_key_for(self, key): """ Given a key, return the actual key stored in self that matches. Raise KeyError if the key isn't found. """ try: return next(e_key for e_key in self.keys() if e_key == key) except StopIteration as err: raise KeyError(key) from err
KeyTransformingDict
python
psf__black
tests/data/cases/dummy_implementations.py
{ "start": 3201, "end": 3251 }
class ____: def f(self): ... # Comment 2
ClassF
python
walkccc__LeetCode
solutions/3514. Number of Unique XOR Triplets II/3515.py
{ "start": 0, "end": 322 }
class ____: def uniqueXorTriplets(self, nums: list[int]) -> int: n = len(nums) if n == 1: return 1 pairs = set(nums[i] ^ nums[j] for i, j in itertools.combinations(range(n), 2)) return len(set(pair ^ num for pair in pairs for num in nums))
Solution
python
PrefectHQ__prefect
tests/events/server/test_in_memory_ordering.py
{ "start": 6210, "end": 8115 }
class ____: async def test_event_seen_tracking( self, causal_ordering: CausalOrdering, event_one: ReceivedEvent ): # Initially not seen assert not await causal_ordering.event_has_been_seen(event_one) assert not await causal_ordering.event_has_been_seen(event_one.id) # Record as seen await causal_ordering.record_event_as_seen(event_one) assert await causal_ordering.event_has_been_seen(event_one) assert await causal_ordering.event_has_been_seen(event_one.id) async def test_seen_events_cleanup( self, causal_ordering: CausalOrdering, event_one: ReceivedEvent ): # Record event as seen await causal_ordering.record_event_as_seen(event_one) assert await causal_ordering.event_has_been_seen(event_one) assert isinstance(causal_ordering._seen_events, TTLCache) assert causal_ordering._seen_events.ttl == SEEN_EXPIRATION.total_seconds() # Verify maxsize is reasonable (prevents unbounded growth) assert causal_ordering._seen_events.maxsize == 10000 # Replace the cache temporarily with one that has a very short TTL original_cache = causal_ordering._seen_events try: # Create a TTLCache with 0.1 second TTL for testing causal_ordering._seen_events = TTLCache(maxsize=10000, ttl=0.1) # Add event to the short-lived cache await causal_ordering.record_event_as_seen(event_one) assert await causal_ordering.event_has_been_seen(event_one) # Wait for expiration await asyncio.sleep(0.15) # Should not be seen anymore due to expiration assert not await causal_ordering.event_has_been_seen(event_one) finally: # Restore original cache causal_ordering._seen_events = original_cache
TestEventSeenTracking
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 224365, "end": 243603 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('type', _nvmlClockType_t), ('pstate', _nvmlPstates_t), ('clockOffsetMHz', c_int), ('minClockOffsetMHz', c_int), ('maxClockOffsetMHz', c_int), ] nvmlClockOffset_v1 = 0x1000018 def nvmlDeviceGetClockOffsets(device, info): fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockOffsets"); ret = fn(device, info) return NVML_SUCCESS def nvmlDeviceSetClockOffsets(device, info): fn = _nvmlGetFunctionPointer("nvmlDeviceSetClockOffsets"); ret = fn(device, info) return NVML_SUCCESS def nvmlDeviceGetSupportedPerformanceStates(device): pstates = [] c_count = c_uint(NVML_MAX_GPU_PERF_PSTATES) c_size = sizeof(c_uint)*c_count.value # NOTE: use 'c_uint' to represent the size of the nvmlPstate_t enumeration. pstates_array = _nvmlPstates_t * c_count.value c_pstates = pstates_array() fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedPerformanceStates") ret = fn(device, c_pstates, c_size) _nvmlCheckReturn(ret) for value in c_pstates: if value != NVML_PSTATE_UNKNOWN: pstates.append(value) return pstates def nvmlDeviceGetGpcClkVfOffset(device): offset = c_int32() fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkVfOffset") ret = fn(device, byref(offset)) _nvmlCheckReturn(ret) return offset.value # Deprecated def nvmlDeviceSetGpcClkVfOffset(device, offset): c_offset = c_int32(offset) fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpcClkVfOffset") ret = fn(device, c_offset) _nvmlCheckReturn(ret) return NVML_SUCCESS def nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset=c_int(), maxOffset=c_int()): isReference = (type(minOffset) is not c_int) or (type(maxOffset) is not c_int) minOffsetRef = minOffset if isReference else byref(minOffset) maxOffsetRef = maxOffset if isReference else byref(maxOffset) fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkMinMaxVfOffset") ret = fn(device, minOffsetRef, maxOffsetRef) _nvmlCheckReturn(ret) return NVML_SUCCESS if isReference else (minOffset.value, maxOffset.value) def nvmlDeviceGetMemClkVfOffset(device): offset = c_int32() fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkVfOffset") ret = fn(device, byref(offset)) _nvmlCheckReturn(ret) return offset.value # Deprecated def nvmlDeviceSetMemClkVfOffset(device, offset): c_offset = c_int32(offset) fn = _nvmlGetFunctionPointer("nvmlDeviceSetMemClkVfOffset") ret = fn(device, c_offset) _nvmlCheckReturn(ret) return NVML_SUCCESS def nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset=c_int(), maxOffset=c_int()): isReference = (type(minOffset) is not c_int) or (type(maxOffset) is not c_int) minOffsetRef = minOffset if isReference else byref(minOffset) maxOffsetRef = maxOffset if isReference else byref(maxOffset) fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkMinMaxVfOffset") ret = fn(device, minOffsetRef, maxOffsetRef) _nvmlCheckReturn(ret) return NVML_SUCCESS if isReference else (minOffset.value, maxOffset.value) def nvmlSystemSetConfComputeGpusReadyState(state): c_state = c_uint(state) fn = _nvmlGetFunctionPointer("nvmlSystemSetConfComputeGpusReadyState") ret = fn(c_state) _nvmlCheckReturn(ret) return NVML_SUCCESS def nvmlSystemGetConfComputeGpusReadyState(): c_state = c_uint() fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeGpusReadyState") ret = fn(byref(c_state)) _nvmlCheckReturn(ret) return c_state.value def nvmlSystemGetConfComputeCapabilities(): c_ccSysCaps = c_nvmlConfComputeSystemCaps_t() fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeCapabilities") ret = fn(byref(c_ccSysCaps)) _nvmlCheckReturn(ret) return c_ccSysCaps def nvmlSystemGetConfComputeState(): c_state = c_nvmlConfComputeSystemState_t() fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeState") ret = fn(byref(c_state)) _nvmlCheckReturn(ret) return c_state def nvmlSystemGetConfComputeSettings(settings): fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeSettings") return fn(settings) def nvmlDeviceSetConfComputeUnprotectedMemSize(device, c_ccMemSize): fn = _nvmlGetFunctionPointer("nvmlDeviceSetConfComputeUnprotectedMemSize") ret = fn(device, c_ccMemSize) _nvmlCheckReturn(ret) return NVML_SUCCESS def nvmlDeviceGetConfComputeMemSizeInfo(device): c_ccMemSize = c_nvmlConfComputeMemSizeInfo_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeMemSizeInfo") ret = fn(device, byref(c_ccMemSize)) _nvmlCheckReturn(ret) return c_ccMemSize def nvmlDeviceGetConfComputeProtectedMemoryUsage(device): c_memory = c_nvmlMemory_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeProtectedMemoryUsage") ret = fn(device, byref(c_memory)) _nvmlCheckReturn(ret) return c_memory def nvmlDeviceGetConfComputeGpuCertificate(device): c_cert = c_nvmlConfComputeGpuCertificate_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeGpuCertificate") ret = fn(device, byref(c_cert)) _nvmlCheckReturn(ret) return c_cert def nvmlDeviceGetConfComputeGpuAttestationReport(device, c_nonce): c_attestReport = c_nvmlConfComputeGpuAttestationReport_t() c_nonce_arr = (c_uint8 * len(c_nonce))(*(c_nonce)) setattr(c_attestReport, 'nonce', c_nonce_arr) fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeGpuAttestationReport") ret = fn(device, byref(c_attestReport)) _nvmlCheckReturn(ret) return c_attestReport def nvmlSystemSetConfComputeKeyRotationThresholdInfo(max_atk_adv): c_keyRotationThrInfo = c_nvmlConfComputeSetKeyRotationThresholdInfo_t(0) c_keyRotationThrInfo.version = ConfComputeSetKeyRotationThresholdInfo_v1 c_keyRotationThrInfo.maxAttackerAdvantage = max_atk_adv fn = _nvmlGetFunctionPointer("nvmlSystemSetConfComputeKeyRotationThresholdInfo") ret = fn(byref(c_keyRotationThrInfo)) _nvmlCheckReturn(ret) return NVML_SUCCESS def nvmlSystemGetConfComputeKeyRotationThresholdInfo(): c_keyRotationThrInfo = c_nvmlConfComputeGetKeyRotationThresholdInfo_t(0) c_keyRotationThrInfo.version = ConfComputeGetKeyRotationThresholdInfo_v1 fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeKeyRotationThresholdInfo") ret = fn(byref(c_keyRotationThrInfo)) _nvmlCheckReturn(ret) return c_keyRotationThrInfo ## GPM ## ######### ## Enums/defines #### GPM Metric Identifiers NVML_GPM_METRIC_GRAPHICS_UTIL = 1 # Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 NVML_GPM_METRIC_SM_UTIL = 2 # Percentage of SMs that were busy. 0.0 - 100.0 NVML_GPM_METRIC_SM_OCCUPANCY = 3 # Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 NVML_GPM_METRIC_INTEGER_UTIL = 4 # Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5 # Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6 # Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7 # Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9 # Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 NVML_GPM_METRIC_DRAM_BW_UTIL = 10 # Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 NVML_GPM_METRIC_FP64_UTIL = 11 # Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 NVML_GPM_METRIC_FP32_UTIL = 12 # Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 NVML_GPM_METRIC_FP16_UTIL = 13 # Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20 # PCIe traffic from this GPU in MiB/sec NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21 # PCIe traffic to this GPU in MiB/sec NVML_GPM_METRIC_NVDEC_0_UTIL = 30 # Percent utilization of NVDEC 0. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_1_UTIL = 31 # Percent utilization of NVDEC 1. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_2_UTIL = 32 # Percent utilization of NVDEC 2. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_3_UTIL = 33 # Percent utilization of NVDEC 3. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_4_UTIL = 34 # Percent utilization of NVDEC 4. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_5_UTIL = 35 # Percent utilization of NVDEC 5. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_6_UTIL = 36 # Percent utilization of NVDEC 6. 0.0 - 100.0 NVML_GPM_METRIC_NVDEC_7_UTIL = 37 # Percent utilization of NVDEC 7. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_0_UTIL = 40 # Percent utilization of NVJPG 0. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_1_UTIL = 41 # Percent utilization of NVJPG 1. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_2_UTIL = 42 # Percent utilization of NVJPG 2. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_3_UTIL = 43 # Percent utilization of NVJPG 3. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_4_UTIL = 44 # Percent utilization of NVJPG 4. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_5_UTIL = 45 # Percent utilization of NVJPG 5. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_6_UTIL = 46 # Percent utilization of NVJPG 6. 0.0 - 100.0 NVML_GPM_METRIC_NVJPG_7_UTIL = 47 # Percent utilization of NVJPG 7. 0.0 - 100.0 NVML_GPM_METRIC_NVOFA_0_UTIL = 50 # Percent utilization of NVOFA 0. 0.0 - 100.0 NVML_GPM_METRIC_NVOFA_1_UTIL = 51 # Percent utilization of NVOFA 1. 0.0 - 100.0 NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60 # NvLink read bandwidth for all links in MiB/sec NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61 # NvLink write bandwidth for all links in MiB/sec NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62 # NvLink read bandwidth for link 0 in MiB/sec NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63 # NvLink write bandwidth for link 0 in MiB/sec NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64 # NvLink read bandwidth for link 1 in MiB/sec NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65 # NvLink write bandwidth for link 1 in MiB/sec NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66 # NvLink read bandwidth for link 2 in MiB/sec NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67 # NvLink write bandwidth for link 2 in MiB/sec NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68 # NvLink read bandwidth for link 3 in MiB/sec NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69 # NvLink write bandwidth for link 3 in MiB/sec NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70 # NvLink read bandwidth for link 4 in MiB/sec NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71 # NvLink write bandwidth for link 4 in MiB/sec NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72 # NvLink read bandwidth for link 5 in MiB/sec NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73 # NvLink write bandwidth for link 5 in MiB/sec NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74 # NvLink read bandwidth for link 6 in MiB/sec NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75 # NvLink write bandwidth for link 6 in MiB/sec NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76 # NvLink read bandwidth for link 7 in MiB/sec NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77 # NvLink write bandwidth for link 7 in MiB/sec NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78 # NvLink read bandwidth for link 8 in MiB/sec NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79 # NvLink write bandwidth for link 8 in MiB/sec NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80 # NvLink read bandwidth for link 9 in MiB/sec NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81 # NvLink write bandwidth for link 9 in MiB/sec NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82 # NvLink read bandwidth for link 10 in MiB/sec NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83 # NvLink write bandwidth for link 10 in MiB/sec NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84 # NvLink read bandwidth for link 11 in MiB/sec NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85 # NvLink write bandwidth for link 11 in MiB/sec NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86 # NvLink read bandwidth for link 12 in MiB/sec NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87 # NvLink write bandwidth for link 12 in MiB/sec NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88 # NvLink read bandwidth for link 13 in MiB/sec NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89 # NvLink write bandwidth for link 13 in MiB/sec NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90 # NvLink read bandwidth for link 14 in MiB/sec NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91 # NvLink write bandwidth for link 14 in MiB/sec NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92 # NvLink read bandwidth for link 15 in MiB/sec NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93 # NvLink write bandwidth for link 15 in MiB/sec NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94 # NvLink read bandwidth for link 16 in MiB/sec NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95 # NvLink write bandwidth for link 16 in MiB/sec NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96 # NvLink read bandwidth for link 17 in MiB/sec NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97 # NvLink write bandwidth for link 17 in MiB/sec NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC = 100 NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC = 101 NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC = 102 NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC = 103 NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = 104 NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = 105 NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = 106 NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = 107 NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = 108 NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = 109 NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = 110 NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = 111 NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = 112 NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = 113 NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = 114 NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = 115 NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = 116 NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = 117 NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = 118 NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = 119 NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = 120 NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = 121 NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = 122 NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = 123 NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = 124 NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = 125 NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = 126 NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = 127 NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = 128 NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = 129 NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = 130 NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = 131 NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = 132 NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = 133 NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = 134 NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = 135 NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = 136 NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = 137 NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = 138 NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = 139 NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = 140 NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = 141 NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = 142 NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = 143 NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = 144 NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = 145 NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = 146 NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = 147 NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = 148 NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = 149 NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = 150 NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = 151 NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = 152 NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = 153 NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = 154 NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = 155 NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = 156 NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = 157 NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = 158 NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = 159 NVML_GPM_METRIC_HOSTMEM_CACHE_HIT = 160 NVML_GPM_METRIC_HOSTMEM_CACHE_MISS = 161 NVML_GPM_METRIC_PEERMEM_CACHE_HIT = 162 NVML_GPM_METRIC_PEERMEM_CACHE_MISS = 163 NVML_GPM_METRIC_DRAM_CACHE_HIT = 164 NVML_GPM_METRIC_DRAM_CACHE_MISS = 165 NVML_GPM_METRIC_NVENC_0_UTIL = 166, NVML_GPM_METRIC_NVENC_1_UTIL = 167, NVML_GPM_METRIC_NVENC_2_UTIL = 168, NVML_GPM_METRIC_NVENC_3_UTIL = 169, NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = 170, NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = 171, NVML_GPM_METRIC_GR0_CTXSW_REQUESTS = 172, NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = 173, NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = 174, NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = 175, NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = 176, NVML_GPM_METRIC_GR1_CTXSW_REQUESTS = 177, NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = 178, NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = 179, NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = 180, NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = 181, NVML_GPM_METRIC_GR2_CTXSW_REQUESTS = 182, NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = 183, NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = 184, NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = 185, NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = 186, NVML_GPM_METRIC_GR3_CTXSW_REQUESTS = 187, NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = 188, NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = 189, NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = 190, NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = 191, NVML_GPM_METRIC_GR4_CTXSW_REQUESTS = 192, NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = 193, NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = 194, NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = 195, NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = 196, NVML_GPM_METRIC_GR5_CTXSW_REQUESTS = 197, NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = 198, NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = 199, NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = 200, NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = 201, NVML_GPM_METRIC_GR6_CTXSW_REQUESTS = 202, NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = 203, NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = 204, NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = 205, NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = 206, NVML_GPM_METRIC_GR7_CTXSW_REQUESTS = 207, NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = 208, NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = 209, NVML_GPM_METRIC_MAX = 210 ## Structs
c_nvmlClockOffset_t
python
ray-project__ray
python/ray/dag/dag_node.py
{ "start": 754, "end": 29422 }
class ____(DAGNodeBase): """Abstract class for a node in a Ray task graph. A node has a type (e.g., FunctionNode), data (e.g., function options and body), arguments (Python values, DAGNodes, and DAGNodes nested within Python argument values) and options (Ray API .options() used for function, class or class method) """ def __init__( self, args: Tuple[Any], kwargs: Dict[str, Any], options: Dict[str, Any], other_args_to_resolve: Dict[str, Any], ): """ args: args (Tuple[Any]): Bound node arguments. ex: func_or_class.bind(1) kwargs (Dict[str, Any]): Bound node keyword arguments. ex: func_or_class.bind(a=1) options (Dict[str, Any]): Bound node options arguments. ex: func_or_class.options(num_cpus=2) other_args_to_resolve (Dict[str, Any]): Bound kwargs to resolve that's specific to subclass implementation without exposing as args in base class, example: ClassMethodNode """ self._bound_args: Tuple[Any] = args or [] self._bound_kwargs: Dict[str, Any] = kwargs or {} self._bound_options: Dict[str, Any] = options or {} self._bound_other_args_to_resolve: Optional[Dict[str, Any]] = ( other_args_to_resolve or {} ) # The list of nodes that use this DAG node as an argument. self._downstream_nodes: List["DAGNode"] = [] # UUID that is not changed over copies of this node. self._stable_uuid = uuid.uuid4().hex # Indicates whether this DAG node contains nested DAG nodes. # Nested DAG nodes are allowed in traditional DAGs but not # in Ray Compiled Graphs, except for MultiOutputNode. self._args_contain_nested_dag_node = False # The list of nodes that this DAG node uses as an argument. self._upstream_nodes: List["DAGNode"] = self._collect_upstream_nodes() # Cached values from last call to execute() self.cache_from_last_execute = {} self._type_hint: ChannelOutputType = ChannelOutputType() # If the original type hint is an AutoTransportType, we make a copy # here when it is resolved to the actual type, as additional debugging # information. Otherwise, it is None. self._original_type_hint: Optional[ChannelOutputType] = None # Whether this node calls `experimental_compile`. self.is_cgraph_output_node = False def _collect_upstream_nodes(self) -> List["DAGNode"]: """ Retrieve upstream nodes and update their downstream dependencies. Currently, the DAG assumes that all DAGNodes in `args`, `kwargs`, and `other_args_to_resolve` are upstream nodes. However, Ray Compiled Graphs builds the upstream/downstream relationship based only on args. Be cautious when persisting DAGNodes in `other_args_to_resolve` and kwargs in the future. TODO (kevin85421): Currently, the upstream nodes and downstream nodes have circular references. Therefore, it relies on the garbage collector to clean them up instead of reference counting. We should consider using weak references to avoid circular references. """ upstream_nodes: List["DAGNode"] = [] # Ray Compiled Graphs do not allow nested DAG nodes in arguments. # Specifically, a DAGNode should not be placed inside any type of # container. However, we only know if this is a compiled graph # when calling `experimental_compile`. Therefore, we need to check # in advance if the arguments contain nested DAG nodes and raise # an error after compilation. assert hasattr(self._bound_args, "__iter__") for arg in self._bound_args: if isinstance(arg, DAGNode): upstream_nodes.append(arg) else: scanner = _PyObjScanner() dag_nodes = scanner.find_nodes(arg) upstream_nodes.extend(dag_nodes) scanner.clear() self._args_contain_nested_dag_node = len(dag_nodes) > 0 scanner = _PyObjScanner() other_upstream_nodes: List["DAGNode"] = scanner.find_nodes( [ self._bound_kwargs, self._bound_other_args_to_resolve, ] ) upstream_nodes.extend(other_upstream_nodes) scanner.clear() # Update dependencies. for upstream_node in upstream_nodes: upstream_node._downstream_nodes.append(self) return upstream_nodes def with_tensor_transport( self, transport: Optional[Union[str, Communicator]] = "auto", device: Literal["default", "cpu", "gpu", "cuda"] = "default", _static_shape: bool = False, _direct_return: bool = False, ): """ Configure the torch tensor transport for this node. Args: transport: Specifies the tensor transport mechanism. - "accelerator": Tensors are communicated using accelerator-specific backends (e.g., NCCL, XLA, or vendor-provided transport). This is the recommended option for most use cases, as it supports extensibility and future hardware backends. - "nccl": Tensors are passed explicitly via NCCL. This option is kept for backwards compatibility and may be removed in the future. Use "accelerator" instead unless you have legacy requirements. - "shm": Tensors are passed via host shared memory and gRPC. Typically used when accelerator-based transport is unavailable or not suitable. - "auto" (default): The system automatically selects the appropriate transport mechanism based on the sender and receiver, usually preferring accelerator-based transport when available. device: The target device to use for the tensor transport. "default": The tensor will maintain its original device placement from the sender "cpu": The tensor will be explicitly moved to CPU device in the receiver "gpu" or "cuda": The tensor will be explicitly moved to GPU device in the receiver _static_shape: A hint indicating whether the shape(s) and dtype(s) of tensor(s) contained in this value always remain the same across different executions of the DAG. If this is True, the transport will be more efficient. _direct_return: Whether the tensor is sent directly or inside of other data. If a "nccl" transport is used, this allows the sender and receiver to eliminate performance overhead from an additional data transfer. """ try: device = Device(device) except ValueError: valid_devices = ", ".join(f"'{d.value}'" for d in Device) raise ValueError( f"Invalid device '{device}'. Valid options are: {valid_devices}." ) if transport == "auto": self._type_hint = AutoTransportType( device=device, _static_shape=_static_shape, _direct_return=_direct_return, ) elif transport == "nccl": self._type_hint = TorchTensorType( transport="accelerator", device=device, _static_shape=_static_shape, _direct_return=_direct_return, ) elif transport == "accelerator": self._type_hint = TorchTensorType( transport="accelerator", device=device, _static_shape=_static_shape, _direct_return=_direct_return, ) elif transport == "shm": self._type_hint = TorchTensorType( device=device, _static_shape=_static_shape, _direct_return=_direct_return, ) else: if not isinstance(transport, Communicator): raise ValueError( f"Invalid transport type: {transport}. " "Transport must be one of 'auto', 'nccl', 'shm', 'accelerator' or " "an instance of Communicator type." ) self._type_hint = TorchTensorType( transport=transport, device=device, _static_shape=_static_shape, _direct_return=_direct_return, ) return self @property def type_hint(self) -> ChannelOutputType: return self._type_hint @type_hint.setter def type_hint(self, type_hint: ChannelOutputType) -> None: if isinstance(self._type_hint, AutoTransportType): self._original_type_hint = self._type_hint self._type_hint = type_hint def get_args(self) -> Tuple[Any]: """Return the tuple of arguments for this node.""" return self._bound_args def get_kwargs(self) -> Dict[str, Any]: """Return the dict of keyword arguments for this node.""" return self._bound_kwargs.copy() def get_options(self) -> Dict[str, Any]: """Return the dict of options arguments for this node.""" return self._bound_options.copy() def get_other_args_to_resolve(self) -> Dict[str, Any]: """Return the dict of other args to resolve arguments for this node.""" return self._bound_other_args_to_resolve.copy() def get_stable_uuid(self) -> str: """Return stable uuid for this node. 1) Generated only once at first instance creation 2) Stable across pickling, replacement and JSON serialization. """ return self._stable_uuid async def get_object_refs_from_last_execute(self) -> Dict[str, Any]: """Gets cached object refs from the last call to execute(). After this DAG is executed through execute(), retrieves a map between node UUID to a reference to the return value of the default executor on that node. """ cache = {} for node_uuid, value in self.cache_from_last_execute.items(): if isinstance(value, asyncio.Task): cache[node_uuid] = await value else: cache[node_uuid] = value return cache def clear_cache(self): self.cache_from_last_execute = {} def experimental_compile( self, _submit_timeout: Optional[float] = None, _buffer_size_bytes: Optional[int] = None, enable_asyncio: bool = False, _max_inflight_executions: Optional[int] = None, _max_buffered_results: Optional[int] = None, _overlap_gpu_communication: Optional[bool] = None, _default_communicator: Optional[Union[Communicator, str]] = "create", ) -> "ray.dag.CompiledDAG": """Compile an accelerated execution path for this DAG. Args: _submit_timeout: The maximum time in seconds to wait for execute() calls. None means using default timeout, 0 means immediate timeout (immediate success or timeout without blocking), -1 means infinite timeout (block indefinitely). _buffer_size_bytes: The initial buffer size in bytes for messages that can be passed between tasks in the DAG. The buffers will be automatically resized if larger messages are written to the channel. enable_asyncio: Whether to enable asyncio for this DAG. _max_inflight_executions: The maximum number of in-flight executions that can be submitted via `execute` or `execute_async` before consuming the output using `ray.get()`. If the caller submits more executions, `RayCgraphCapacityExceeded` is raised. _max_buffered_results: The maximum number of results that can be buffered at the driver. If more than this number of results are buffered, `RayCgraphCapacityExceeded` is raised. Note that when result corresponding to an execution is retrieved (by calling `ray.get()` on a `CompiledDAGRef` or `CompiledDAGRef` or await on a `CompiledDAGFuture`), results corresponding to earlier executions that have not been retrieved yet are buffered. _overlap_gpu_communication: (experimental) Whether to overlap GPU communication with computation during DAG execution. If True, the communication and computation can be overlapped, which can improve the performance of the DAG execution. If None, the default value will be used. _default_communicator: The default communicator to use to transfer tensors. Three types of values are valid. (1) Communicator: For p2p operations, this is the default communicator to use for nodes annotated with `with_tensor_transport()` and when shared memory is not the desired option (e.g., when transport="nccl", or when transport="auto" for communication between two different GPUs). For collective operations, this is the default communicator to use when a custom communicator is not specified. (2) "create": for each collective operation without a custom communicator specified, a communicator is created and initialized on its involved actors, or an already created communicator is reused if the set of actors is the same. For all p2p operations without a custom communicator specified, it reuses an already created collective communicator if the p2p actors are a subset. Otherwise, a new communicator is created. (3) None: a ValueError will be thrown if a custom communicator is not specified. Returns: A compiled DAG. """ from ray.dag import DAGContext ctx = DAGContext.get_current() if _buffer_size_bytes is None: _buffer_size_bytes = ctx.buffer_size_bytes # Validate whether this DAG node has already been compiled. if self.is_cgraph_output_node: raise ValueError( "It is not allowed to call `experimental_compile` on the same DAG " "object multiple times no matter whether `teardown` is called or not. " "Please reuse the existing compiled DAG or create a new one." ) # Whether this node is an output node in the DAG. We cannot determine # this in the constructor because the output node is determined when # `experimental_compile` is called. self.is_cgraph_output_node = True return build_compiled_dag_from_ray_dag( self, _submit_timeout, _buffer_size_bytes, enable_asyncio, _max_inflight_executions, _max_buffered_results, _overlap_gpu_communication, _default_communicator, ) def execute( self, *args, _ray_cache_refs: bool = False, **kwargs ) -> Union[ray.ObjectRef, "ray.actor.ActorHandle"]: """Execute this DAG using the Ray default executor _execute_impl(). Args: _ray_cache_refs: If true, stores the default executor's return values on each node in this DAG in a cache. These should be a mix of: - ray.ObjectRefs pointing to the outputs of method and function nodes - Serve handles for class nodes - resolved values representing user input at runtime """ def executor(node): return node._execute_impl(*args, **kwargs) result = self.apply_recursive(executor) if _ray_cache_refs: self.cache_from_last_execute = executor.cache return result def _get_toplevel_child_nodes(self) -> List["DAGNode"]: """Return the list of nodes specified as top-level args. For example, in `f.remote(a, [b])`, only `a` is a top-level arg. This list of nodes are those that are typically resolved prior to task execution in Ray. This does not include nodes nested within args. For that, use ``_get_all_child_nodes()``. """ # we use List instead of Set here because the hash key of the node # object changes each time we create it. So if using Set here, the # order of returned children can be different if we create the same # nodes and dag one more time. children = [] for a in self.get_args(): if isinstance(a, DAGNode): if a not in children: children.append(a) for a in self.get_kwargs().values(): if isinstance(a, DAGNode): if a not in children: children.append(a) for a in self.get_other_args_to_resolve().values(): if isinstance(a, DAGNode): if a not in children: children.append(a) return children def _get_all_child_nodes(self) -> List["DAGNode"]: """Return the list of nodes referenced by the args, kwargs, and args_to_resolve in current node, even they're deeply nested. Examples: f.remote(a, [b]) -> [a, b] f.remote(a, [b], key={"nested": [c]}) -> [a, b, c] """ scanner = _PyObjScanner() # we use List instead of Set here, reason explained # in `_get_toplevel_child_nodes`. children = [] for n in scanner.find_nodes( [ self._bound_args, self._bound_kwargs, self._bound_other_args_to_resolve, ] ): if n not in children: children.append(n) scanner.clear() return children def _apply_and_replace_all_child_nodes( self, fn: "Callable[[DAGNode], T]" ) -> "DAGNode": """Apply and replace all immediate child nodes using a given function. This is a shallow replacement only. To recursively transform nodes in the DAG, use ``apply_recursive()``. Args: fn: Callable that will be applied once to each child of this node. Returns: New DAGNode after replacing all child nodes. """ replace_table = {} # CloudPickler scanner object for current layer of DAGNode. Same # scanner should be use for a full find & replace cycle. scanner = _PyObjScanner() # Find all first-level nested DAGNode children in args. # Update replacement table and execute the replace. for node in scanner.find_nodes( [ self._bound_args, self._bound_kwargs, self._bound_other_args_to_resolve, ] ): if node not in replace_table: replace_table[node] = fn(node) new_args, new_kwargs, new_other_args_to_resolve = scanner.replace_nodes( replace_table ) scanner.clear() # Return updated copy of self. return self._copy( new_args, new_kwargs, self.get_options(), new_other_args_to_resolve ) def apply_recursive(self, fn: "Callable[[DAGNode], T]") -> T: """Apply callable on each node in this DAG in a bottom-up tree walk. Args: fn: Callable that will be applied once to each node in the DAG. It will be applied recursively bottom-up, so nodes can assume the fn has been applied to their args already. Returns: Return type of the fn after application to the tree. """ if not type(fn).__name__ == "_CachingFn": class _CachingFn: def __init__(self, fn): self.cache = {} self.fn = fn self.fn.cache = self.cache self.input_node_uuid = None def __call__(self, node: "DAGNode"): from ray.dag.input_node import InputNode if node._stable_uuid not in self.cache: self.cache[node._stable_uuid] = self.fn(node) if isinstance(node, InputNode): if not self.input_node_uuid: self.input_node_uuid = node._stable_uuid elif self.input_node_uuid != node._stable_uuid: raise AssertionError( "Each DAG should only have one unique InputNode." ) return self.cache[node._stable_uuid] fn = _CachingFn(fn) else: if self._stable_uuid in fn.cache: return fn.cache[self._stable_uuid] return fn( self._apply_and_replace_all_child_nodes( lambda node: node.apply_recursive(fn) ) ) def traverse_and_apply(self, fn: "Callable[[DAGNode], T]"): """ Traverse all nodes in the connected component of the DAG that contains the `self` node, and apply the given function to each node. """ visited = set() queue = [self] cgraph_output_node: Optional[DAGNode] = None while queue: node = queue.pop(0) if node._args_contain_nested_dag_node: self._raise_nested_dag_node_error(node._bound_args) if node not in visited: if node.is_cgraph_output_node: # Validate whether there are multiple nodes that call # `experimental_compile`. if cgraph_output_node is not None: raise ValueError( "The DAG was compiled more than once. The following two " "nodes call `experimental_compile`: " f"(1) {cgraph_output_node}, (2) {node}" ) cgraph_output_node = node fn(node) visited.add(node) """ Add all unseen downstream and upstream nodes to the queue. This function should be called by the root of the DAG. However, in some invalid cases, some nodes may not be descendants of the root. Therefore, we also add upstream nodes to the queue so that a meaningful error message can be raised when the DAG is compiled. ``` with InputNode() as inp: dag = MultiOutputNode([a1.inc.bind(inp), a2.inc.bind(1)]) ``` In the above example, `a2.inc` is not a descendant of inp. If we only add downstream nodes to the queue, the `a2.inc` node will not be visited , and the error message will be hard to understand, such as a key error in the compiled DAG. """ for neighbor in chain.from_iterable( [node._downstream_nodes, node._upstream_nodes] ): if neighbor not in visited: queue.append(neighbor) def _raise_nested_dag_node_error(self, args): """ Raise an error for nested DAGNodes in Ray Compiled Graphs. Args: args: The arguments of the DAGNode. """ for arg in args: if isinstance(arg, DAGNode): continue else: scanner = _PyObjScanner() dag_nodes = scanner.find_nodes([arg]) scanner.clear() if len(dag_nodes) > 0: raise ValueError( f"Found {len(dag_nodes)} DAGNodes from the arg {arg} " f"in {self}. Please ensure that the argument is a " "single DAGNode and that a DAGNode is not allowed to " "be placed inside any type of container." ) raise AssertionError( "A DAGNode's args should contain nested DAGNodes as args, " "but none were found during the compilation process. This is a " "Ray internal error. Please report this issue to the Ray team." ) def _find_root(self) -> "DAGNode": """ Return the root node of the DAG. The root node must be an InputNode. """ from ray.dag.input_node import InputNode node = self while not isinstance(node, InputNode): if len(node._upstream_nodes) == 0: raise ValueError( "No InputNode found in the DAG: when traversing upwards, " f"no upstream node was found for {node}." ) node = node._upstream_nodes[0] return node def apply_functional( self, source_input_list: Any, predicate_fn: Callable, apply_fn: Callable, ): """ Apply a given function to DAGNodes in source_input_list, and return the replaced inputs without mutating or coping any DAGNode. Args: source_input_list: Source inputs to extract and apply function on all children DAGNode instances. predicate_fn: Applied on each DAGNode instance found and determine if we should apply function to it. Can be used to filter node types. apply_fn: Function to apply on the node on bound attributes. Example:: apply_fn = lambda node: node._get_serve_deployment_handle( node._deployment, node._bound_other_args_to_resolve ) Returns: replaced_inputs: Outputs of apply_fn on DAGNodes in source_input_list that passes predicate_fn. """ replace_table = {} scanner = _PyObjScanner() for node in scanner.find_nodes(source_input_list): if predicate_fn(node) and node not in replace_table: replace_table[node] = apply_fn(node) replaced_inputs = scanner.replace_nodes(replace_table) scanner.clear() return replaced_inputs def _execute_impl( self, *args, **kwargs ) -> Union[ray.ObjectRef, "ray.actor.ActorHandle"]: """Execute this node, assuming args have been transformed already.""" raise NotImplementedError def _copy_impl( self, new_args: List[Any], new_kwargs: Dict[str, Any], new_options: Dict[str, Any], new_other_args_to_resolve: Dict[str, Any], ) -> "DAGNode": """Return a copy of this node with the given new args.""" raise NotImplementedError def _copy( self, new_args: List[Any], new_kwargs: Dict[str, Any], new_options: Dict[str, Any], new_other_args_to_resolve: Dict[str, Any], ) -> "DAGNode": """Return a copy of this node with the given new args.""" instance = self._copy_impl( new_args, new_kwargs, new_options, new_other_args_to_resolve ) instance._stable_uuid = self._stable_uuid instance._type_hint = copy.deepcopy(self._type_hint) instance._original_type_hint = copy.deepcopy(self._original_type_hint) return instance def __getstate__(self): """Required due to overriding `__getattr__` else pickling fails.""" return self.__dict__ def __setstate__(self, d: Dict[str, Any]): """Required due to overriding `__getattr__` else pickling fails.""" self.__dict__.update(d) def __getattr__(self, attr: str): if attr == "bind": raise AttributeError(f".bind() cannot be used again on {type(self)} ") elif attr == "remote": raise AttributeError( f".remote() cannot be used on {type(self)}. To execute the task " "graph for this node, use .execute()." ) else: return self.__getattribute__(attr)
DAGNode
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 62739, "end": 63077 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) def forward(self, x): x = self.conv(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),)
ConvModel
python
crytic__slither
slither/tools/upgradeability/checks/abstract_checks.py
{ "start": 369, "end": 983 }
class ____(ComparableEnum): HIGH = 0 MEDIUM = 1 LOW = 2 INFORMATIONAL = 3 UNIMPLEMENTED = 999 classification_colors: Dict[CheckClassification, Callable[[str], str]] = { CheckClassification.INFORMATIONAL: green, CheckClassification.LOW: yellow, CheckClassification.MEDIUM: yellow, CheckClassification.HIGH: red, } classification_txt = { CheckClassification.INFORMATIONAL: "Informational", CheckClassification.LOW: "Low", CheckClassification.MEDIUM: "Medium", CheckClassification.HIGH: "High", } CHECK_INFO = List[Union[str, SupportedOutput]]
CheckClassification
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/ai.py
{ "start": 884, "end": 2305 }
class ____: branch_name: str pr_title: str def load_prompt_template(prompt_filename: str, context: str) -> str: """Load a prompt template and inject context. Args: prompt_filename: The name of the prompt file (e.g., 'branch_name_only.md') context: The context to inject into the prompt Returns: The formatted prompt string """ prompt_path = Path(__file__).parent / "prompts" / prompt_filename template = prompt_path.read_text() return template.format(context=context) def load_scaffolding_prompt(plan: str) -> str: """Load the scaffolding prompt template and append user input. Args: plan: The output of the planning phase Returns: The full scaffolding prompt """ prompts_dir = Path(__file__).parent / "prompts" best_practices = (prompts_dir / "best_practices.md").read_text() return plan + "\n\n" + best_practices def get_allowed_commands_scaffolding() -> list[str]: """Get the list of allowed commands for scaffolding operations.""" return ALLOWED_COMMANDS_SCAFFOLDING.copy() def get_allowed_commands_planning() -> list[str]: """Get the list of allowed commands for planning operations. Planning operations need to analyze the codebase but should not make any modifications. This returns a read-only subset of tools. """ return ALLOWED_COMMANDS_PLANNING.copy()
ExtractedNames
python
huggingface__transformers
src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py
{ "start": 4569, "end": 14376 }
class ____(BaseImageProcessor): r""" Constructs a LayoutLMv2 image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be overridden by `do_resize` in `preprocess`. size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. apply_ocr (`bool`, *optional*, defaults to `True`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by `apply_ocr` in `preprocess`. ocr_lang (`str`, *optional*): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. Can be overridden by `ocr_lang` in `preprocess`. tesseract_config (`str`, *optional*, defaults to `""`): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. For example: '--psm 6'. Can be overridden by `tesseract_config` in `preprocess`. """ model_input_names = ["pixel_values"] valid_kwargs = LayoutLMv2ImageProcessorKwargs def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, apply_ocr: bool = True, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = "", **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.apply_ocr = apply_ocr self.ocr_lang = ocr_lang self.tesseract_config = tesseract_config # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, apply_ocr: Optional[bool] = None, ocr_lang: Optional[str] = None, tesseract_config: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Desired size of the output image after resizing. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PIL.Image` resampling filter. Only has an effect if `do_resize` is set to `True`. apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`): The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is used. tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`): Any additional custom configuration flags that are forwarded to the `config` parameter when calling Tesseract. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") validate_preprocess_arguments( do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if apply_ocr: requires_backends(self, "pytesseract") words_batch = [] boxes_batch = [] for image in images: words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format) words_batch.append(words) boxes_batch.append(boxes) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] # flip color channels from RGB to BGR (as Detectron2 requires this) images = [flip_channel_order(image, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) if apply_ocr: data["words"] = words_batch data["boxes"] = boxes_batch return data __all__ = ["LayoutLMv2ImageProcessor"]
LayoutLMv2ImageProcessor
python
pytorch__pytorch
torch/utils/data/dataloader.py
{ "start": 34792, "end": 36106 }
class ____(_BaseDataLoaderIter): def __init__(self, loader) -> None: super().__init__(loader) if self._timeout != 0: raise AssertionError("_SingleProcessDataLoaderIter requires timeout == 0") if self._num_workers != 0: raise AssertionError( "_SingleProcessDataLoaderIter requires num_workers == 0" ) # Adds forward compatibilities so classic DataLoader can work with DataPipes: # Taking care of distributed sharding if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): # For BC, use default SHARDING_PRIORITIES torch.utils.data.graph_settings.apply_sharding( self._dataset, self._world_size, self._rank ) self._dataset_fetcher = _DatasetKind.create_fetcher( self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last, ) def _next_data(self): index = self._next_index() # may raise StopIteration data = self._dataset_fetcher.fetch(index) # may raise StopIteration if self._pin_memory: data = _utils.pin_memory.pin_memory(data, self._pin_memory_device) return data
_SingleProcessDataLoaderIter
python
modin-project__modin
modin/pandas/resample.py
{ "start": 1373, "end": 13126 }
class ____(ClassLogger): _dataframe: Union[DataFrame, Series] _query_compiler: BaseQueryCompiler def __init__( self, dataframe: Union[DataFrame, Series], rule, axis=0, closed=None, label=None, convention="start", kind=None, on=None, level=None, origin="start_day", offset=None, group_keys=lib.no_default, ): self._dataframe = dataframe self._query_compiler = dataframe._query_compiler self.axis = self._dataframe._get_axis_number(axis) self.resample_kwargs = { "rule": rule, "axis": axis, "closed": closed, "label": label, "convention": convention, "kind": kind, "on": on, "level": level, "origin": origin, "offset": offset, "group_keys": group_keys, } self.__groups = self._get_groups() def _get_groups(self): """ Compute the resampled groups. Returns ------- PandasGroupby Groups as specified by resampling arguments. """ df = self._dataframe if self.axis == 0 else self._dataframe.T convention = self.resample_kwargs["convention"] groups = df.groupby( pandas.Grouper( key=self.resample_kwargs["on"], freq=self.resample_kwargs["rule"], closed=self.resample_kwargs["closed"], label=self.resample_kwargs["label"], convention=convention if convention is not lib.no_default else "start", level=self.resample_kwargs["level"], origin=self.resample_kwargs["origin"], offset=self.resample_kwargs["offset"], ), group_keys=self.resample_kwargs["group_keys"], ) return groups def __getitem__(self, key): """ Get ``Resampler`` based on `key` columns of original dataframe. Parameters ---------- key : str or list String or list of selections. Returns ------- modin.pandas.BasePandasDataset New ``Resampler`` based on `key` columns subset of the original dataframe. """ def _get_new_resampler(key): subset = self._dataframe[key] resampler = type(self)(subset, **self.resample_kwargs) return resampler from .series import Series if isinstance( key, (list, tuple, Series, pandas.Series, pandas.Index, np.ndarray) ): if len(self._dataframe.columns.intersection(key)) != len(set(key)): missed_keys = list(set(key).difference(self._dataframe.columns)) raise KeyError(f"Columns not found: {str(sorted(missed_keys))[1:-1]}") return _get_new_resampler(list(key)) if key not in self._dataframe: raise KeyError(f"Column not found: {key}") return _get_new_resampler(key) @property def groups(self): return self._query_compiler.default_to_pandas( lambda df: pandas.DataFrame.resample(df, **self.resample_kwargs).groups ) @property def indices(self): return self._query_compiler.default_to_pandas( lambda df: pandas.DataFrame.resample(df, **self.resample_kwargs).indices ) def get_group(self, name, obj=None): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_get_group( self.resample_kwargs, name, obj ) ) def apply(self, func, *args, **kwargs): func = cast_function_modin2pandas(func) from .dataframe import DataFrame if isinstance(self._dataframe, DataFrame): query_comp_op = self._query_compiler.resample_app_df else: query_comp_op = self._query_compiler.resample_app_ser dataframe = DataFrame( query_compiler=query_comp_op( self.resample_kwargs, func, *args, **kwargs, ) ) if is_list_like(func) or isinstance(self._dataframe, DataFrame): return dataframe else: if len(dataframe.index) == 1: return dataframe.iloc[0] else: return dataframe.squeeze() def aggregate(self, func, *args, **kwargs): from .dataframe import DataFrame if isinstance(self._dataframe, DataFrame): query_comp_op = self._query_compiler.resample_agg_df else: query_comp_op = self._query_compiler.resample_agg_ser dataframe = DataFrame( query_compiler=query_comp_op( self.resample_kwargs, func, *args, **kwargs, ) ) if is_list_like(func) or isinstance(self._dataframe, DataFrame): return dataframe else: if len(dataframe.index) == 1: return dataframe.iloc[0] else: return dataframe.squeeze() def transform(self, arg, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_transform( self.resample_kwargs, arg, *args, **kwargs ) ) def pipe(self, func, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_pipe( self.resample_kwargs, func, *args, **kwargs ) ) def ffill(self, limit=None): return self.fillna(method="ffill", limit=limit) def bfill(self, limit=None): return self.fillna(method="bfill", limit=limit) def nearest(self, limit=None): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_nearest( self.resample_kwargs, limit ) ) def fillna(self, method, limit=None): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_fillna( self.resample_kwargs, method, limit ) ) def asfreq(self, fill_value=None): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_asfreq( self.resample_kwargs, fill_value ) ) def interpolate( self, method="linear", *, axis=0, limit=None, inplace=False, limit_direction: Optional[str] = None, limit_area=None, downcast=lib.no_default, **kwargs, ): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_interpolate( self.resample_kwargs, method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs, ) ) def count(self): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_count(self.resample_kwargs) ) def nunique(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_nunique( self.resample_kwargs, *args, **kwargs ) ) def first(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_first( self.resample_kwargs, *args, **kwargs, ) ) def last(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_last( self.resample_kwargs, *args, **kwargs, ) ) def max(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_max( self.resample_kwargs, *args, **kwargs, ) ) def mean(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_mean( self.resample_kwargs, *args, **kwargs, ) ) def median(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_median( self.resample_kwargs, *args, **kwargs, ) ) def min(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_min( self.resample_kwargs, *args, **kwargs, ) ) def ohlc(self, *args, **kwargs): from .dataframe import DataFrame if isinstance(self._dataframe, DataFrame): return DataFrame( query_compiler=self._query_compiler.resample_ohlc_df( self.resample_kwargs, *args, **kwargs, ) ) else: return DataFrame( query_compiler=self._query_compiler.resample_ohlc_ser( self.resample_kwargs, *args, **kwargs, ) ) def prod(self, min_count=0, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_prod( self.resample_kwargs, min_count=min_count, *args, **kwargs ) ) def size(self): from .series import Series output_series = Series( query_compiler=self._query_compiler.resample_size(self.resample_kwargs) ) if not isinstance(self._dataframe, Series): # If input is a DataFrame, rename output Series to None return output_series.rename(None) return output_series def sem(self, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_sem( self.resample_kwargs, *args, **kwargs, ) ) def std(self, ddof=1, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_std( self.resample_kwargs, *args, ddof=ddof, **kwargs ) ) def sum(self, min_count=0, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_sum( self.resample_kwargs, min_count=min_count, *args, **kwargs ) ) def var(self, ddof=1, *args, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_var( self.resample_kwargs, *args, ddof=ddof, **kwargs ) ) def quantile(self, q=0.5, **kwargs): return self._dataframe.__constructor__( query_compiler=self._query_compiler.resample_quantile( self.resample_kwargs, q, **kwargs ) )
Resampler
python
huggingface__transformers
src/transformers/generation/candidate_generator.py
{ "start": 3334, "end": 17462 }
class ____(CandidateGenerator): """ `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates candidates through the use of a smaller model. Read the following blog post for more information: https://huggingface.co/blog/assisted-generation Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) assistant_model (`PreTrainedModel`): The model to be used for generating candidates. This model should be smaller than the main model. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. logits_processor (`LogitsProcessorList`): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. model_kwargs (`Dict`): The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant model as well. inputs_tensor (`torch.Tensor`, *optional*): The model input tensor. In encoder-decoder models, this is the encoder input. """ def __init__( self, input_ids: torch.LongTensor, assistant_model: "PreTrainedModel", generation_config: "GenerationConfig", model_kwargs: dict, inputs_tensor: torch.Tensor | None = None, logits_processor: Optional["LogitsProcessorList"] = None, ): # Make sure all data at the same device as assistant model device = assistant_model.device input_ids = input_ids.to(device) if inputs_tensor is not None: inputs_tensor = inputs_tensor.to(device) # Prepare the assistant and the starting number of candidate tokens self.assistant_model = assistant_model self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens self.assistant_confidence_threshold = assistant_model.generation_config.assistant_confidence_threshold # Set eos in assistant same as in target model self.assistant_model.generation_config.eos_token_id = generation_config.eos_token_id # Prepare the kwargs for the assistant model assistant_kwargs = {} for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads if key not in ("encoder_outputs", "past_key_values"): assistant_kwargs[key] = ( value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value) ) # Remove potential default "logits_to_keep" key if "logits_to_keep" in assistant_kwargs and not assistant_model._supports_logits_to_keep(): del assistant_kwargs["logits_to_keep"] # If the assistant is an encoder-decoder model, assume the encoder is different on the assistant. if assistant_model.config.is_encoder_decoder: inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs( inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs ) assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, assistant_kwargs, model_input_name, assistant_model.generation_config ) elif "encoder_outputs" in model_kwargs: assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"] self.assistant_kwargs = assistant_kwargs # Prepare assistant model's keys of inputs if assistant_model.config.is_encoder_decoder: # both are encoder-decoder self.input_ids_key = "decoder_input_ids" elif "encoder_outputs" in assistant_kwargs: # special case for encoder-decoder with decoder-only assistant (like DistilWhisper) self.input_ids_key = "input_ids" self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get( "decoder_attention_mask", torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long), ) else: # both are decoder-only self.input_ids_key = "input_ids" # Prepare generation-related options. self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() self.generation_config = copy.deepcopy(generation_config) self.generation_config.return_dict_in_generate = True self.generation_config.output_scores = True self.generation_config.assistant_confidence_threshold = self.assistant_confidence_threshold # this flag allow us set the confidence stopping criteria for assistant model generation. self.generation_config.is_assistant = True # avoid unnecessary warnings that min_length is larger than max_new_tokens # remove the `MinLengthLogitsProcessor` if exists (NOTE: no need to check for `MinNewTokensLogitsProcessor`) self.main_model_min_length = self.generation_config.min_length self.generation_config.min_length = 0 self.generation_config.min_new_tokens = None for processor in self.logits_processor: if isinstance(processor, MinLengthLogitsProcessor): raise ValueError( "Passing `MinLengthLogitsProcessor` when using `assisted_generation is disabled. " "Please pass in `min_length` into `.generate()` instead" ) # We need to roll back the cache in assisted generation, only DynamicCache is supported self.generation_config.cache_implementation = "dynamic_full" if ( is_sklearn_available() and self.assistant_model.generation_config.assistant_confidence_threshold and type(self) is AssistedCandidateGenerator ): self.probs = [] self.matches = [] def get_candidates(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor, torch.FloatTensor | None]: """ Fetches the candidates to be tried for the current input. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) Return: `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length, vocabulary_size)` containing the logits associated to each candidate. """ input_ids = input_ids.to(self.assistant_model.device) # Calculate new tokens to generate min_new_tokens, max_new_tokens = self._calculate_new_tokens(input_ids) if max_new_tokens == 0: return input_ids, None # Update past key values and masks self._update_past_and_masks(input_ids) # Generate candidates generation_args = self._prepare_generation_args(input_ids, min_new_tokens, max_new_tokens) candidate_ids, candidate_logits = self._generate_candidates(generation_args) return candidate_ids, candidate_logits def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): """ Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions. """ # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the # cost of forecasting incorrect assistant tokens. if self.assistant_model.generation_config.num_assistant_tokens_schedule in { "heuristic", "heuristic_transient", }: # len(scores[0])-1 is the number of candidates according to the target tokenizer. if num_matches == len(scores[0]) - 1: self.num_assistant_tokens += 2.0 else: self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0) # The assistant's confidence threshold is adjusted throughout the speculative iterations to reduce the number of unnecessary draft and target forward passes. The costs are estimated based on the ROC curve, which considers the probability of the draft token and its match with the target. A cost of 25% is assigned to false positives and 75% to false negatives. # This adaptation is not compatible with UAG, as it relies on the number of matched tokens based on the draft vocabulary, which is unavailable in UAG. if ( is_sklearn_available() and self.assistant_model.generation_config.assistant_confidence_threshold and type(self) is AssistedCandidateGenerator ): # update self.matches self.matches.extend([1] * num_matches) if len(self.probs) > len(self.matches): self.matches.append(0) # update self.probs excess_length = len(self.probs) - len(self.matches) if excess_length > 0: del self.probs[-excess_length:] if ( len(self.probs) > 5 and {0, 1}.issubset(self.matches) ): # require at least 5 samples to calculate the ROC curve and at least one positive and one negative sample fpr, tpr, thresholds = roc_curve(self.matches, self.probs) fnr = 1 - tpr # Calculate the cost for each threshold costs = fpr + 3 * fnr # Find the threshold that minimizes the cost optimal_threshold_index = np.argmin(costs) best_threshold = thresholds[optimal_threshold_index] self.assistant_model.generation_config.assistant_confidence_threshold = best_threshold def _calculate_new_tokens(self, input_ids: torch.LongTensor) -> tuple[int, int]: """Calculate the minimum and maximum number of new tokens to generate.""" new_cur_len = input_ids.shape[-1] max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1) min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0) return min_new_tokens, max_new_tokens def _update_past_and_masks( self, input_ids: torch.LongTensor, remove_from_pkv: int = 0, num_added_tokens: int = 1 ) -> bool: """Update past key values and attention masks for subsequent generation rounds.""" has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None if has_past_key_values: new_cache_size = input_ids.shape[-1] - 1 - remove_from_pkv self.assistant_kwargs["past_key_values"].crop(new_cache_size - num_added_tokens) self.assistant_kwargs = _prepare_attention_mask( self.assistant_kwargs, input_ids.shape[-1], self.assistant_model.config.is_encoder_decoder ) self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, input_ids.shape[-1]) # This unsets `dynamic_full`, needed to initialize a new cache for the assistant. After the first forward # pass on each generation, we reuse the cache instead. self.generation_config.cache_implementation = None return has_past_key_values def _prepare_generation_args(self, input_ids: torch.LongTensor, min_new_tokens: int, max_new_tokens: int) -> dict: """Prepare arguments for the generation call.""" return { self.input_ids_key: input_ids, "min_new_tokens": min_new_tokens, "max_new_tokens": max_new_tokens, "generation_config": self.generation_config, "logits_processor": self.logits_processor, } def _generate_candidates(self, generation_args: dict) -> tuple[torch.LongTensor, torch.FloatTensor | None]: """Generate candidate sequences using the assistant model.""" assistant_output = self.assistant_model.generate(**generation_args, **self.assistant_kwargs) self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values if ( is_sklearn_available() and self.assistant_model.generation_config.assistant_confidence_threshold and type(self) is AssistedCandidateGenerator ): scores_tensor = torch.cat(assistant_output.scores, dim=0) scores_softmax = torch.softmax(scores_tensor, dim=-1) ids = assistant_output.sequences[-1, -len(assistant_output.scores) :] p = scores_softmax[range(len(ids)), ids] self.probs.extend(p.tolist()) candidate_logits = torch.stack(assistant_output.scores, dim=1) candidate_ids = assistant_output.sequences return candidate_ids, candidate_logits
AssistedCandidateGenerator
python
pytorch__pytorch
torch/_higher_order_ops/invoke_subgraph.py
{ "start": 1443, "end": 1632 }
class ____: num_fw_outs: Optional[int] = None indexes_with_symint: set[int] = field(default_factory=set) indexes_with_no_grad: set[int] = field(default_factory=set)
OutputMetadata
python
pytorch__pytorch
tools/experimental/torchfuzz/operators/nn_functional.py
{ "start": 19066, "end": 20507 }
class ____(Operator): """Operator for torch.nn.functional.gelu (Gaussian Error Linear Unit).""" def __init__(self): super().__init__("torch.nn.functional.gelu") @property def torch_op_name(self) -> str | None: """Return the torch operation name.""" return "torch.nn.functional.gelu" def can_produce(self, output_spec: Spec) -> bool: """GELU can produce tensor outputs with floating point dtypes.""" if not isinstance(output_spec, TensorSpec): return False return is_float_dtype(output_spec.dtype) def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]: """Generate input specs for GELU operation. GELU is element-wise, so input shape matches output shape. """ if not isinstance(output_spec, TensorSpec): raise ValueError("GELUOperator can only produce TensorSpec outputs") input_spec = TensorSpec( size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype ) return [input_spec] def codegen( self, output_name: str, input_names: list[str], output_spec: Spec ) -> str: """Generate code for GELU operation.""" if len(input_names) != 1: raise ValueError("GELU requires exactly 1 input") input_name = input_names[0] return f"{output_name} = torch.nn.functional.gelu({input_name})"
GELUOperator
python
matplotlib__matplotlib
lib/matplotlib/backends/qt_editor/_formlayout.py
{ "start": 1770, "end": 3157 }
class ____(QtWidgets.QPushButton): """ Color choosing push button """ colorChanged = QtCore.Signal(QtGui.QColor) def __init__(self, parent=None): super().__init__(parent) self.setFixedSize(20, 20) self.setIconSize(QtCore.QSize(12, 12)) self.clicked.connect(self.choose_color) self._color = QtGui.QColor() def choose_color(self): color = QtWidgets.QColorDialog.getColor( self._color, self.parentWidget(), "", QtWidgets.QColorDialog.ColorDialogOption.ShowAlphaChannel) if color.isValid(): self.set_color(color) def get_color(self): return self._color @QtCore.Slot(QtGui.QColor) def set_color(self, color): if color != self._color: self._color = color self.colorChanged.emit(self._color) pixmap = QtGui.QPixmap(self.iconSize()) pixmap.fill(color) self.setIcon(QtGui.QIcon(pixmap)) color = QtCore.Property(QtGui.QColor, get_color, set_color) def to_qcolor(color): """Create a QColor from a matplotlib color""" qcolor = QtGui.QColor() try: rgba = mcolors.to_rgba(color) except ValueError: _api.warn_external(f'Ignoring invalid color {color!r}') return qcolor # return invalid QColor qcolor.setRgbF(*rgba) return qcolor
ColorButton
python
joke2k__faker
faker/providers/currency/uz_UZ/__init__.py
{ "start": 46, "end": 5889 }
class ____(CurrencyProvider): # Format: (code, name) currencies = ( ("AED", "BAA Dirhami"), ("AFN", "Afg‘oni"), ("ALL", "Lek"), ("AMD", "Arman dramasi"), ("ANG", "Niderlandiya Antil guldeni"), ("AOA", "Kvanza"), ("ARS", "Argentina pesosi"), ("AUD", "Avstraliya dollari"), ("AWG", "Aruba florini"), ("AZN", "Ozarbayjon manati"), ("BAM", "Bosniya va Gertsegovina konvertatsiya qilinadigan markasi"), ("BBD", "Barbados dollari"), ("BDT", "Taka"), ("BGN", "Bolgariya levi"), ("BHD", "Bahrayn dinori"), ("BIF", "Burundi franki"), ("BMD", "Bermuda dollari"), ("BND", "Bruney dollari"), ("BOB", "Boliviano"), ("BRL", "Braziliya reali"), ("BSD", "Bagama dollari"), ("BTN", "Ngultrum"), ("BWP", "Pula"), ("BYR", "Belarus rubli"), ("BZD", "Beliz dollari"), ("CAD", "Kanada dollari"), ("CDF", "Kongo franki"), ("CHF", "Shveytsariya franki"), ("CLP", "Chili pesosi"), ("CNY", "Yuan"), ("COP", "Kolumbiya pesosi"), ("CRC", "Kosta-Rika koloni"), ("CUC", "Kuba konvertatsiya qilinadigan pesosi"), ("CUP", "Kuba pesosi"), ("CVE", "Kabo-Verde eskudosi"), ("CZK", "Chex kronasi"), ("DJF", "Jibuti franki"), ("DKK", "Daniya kronasi"), ("DOP", "Dominikan pesosi"), ("DZD", "Jazoir dinori"), ("EGP", "Misr funti"), ("ERN", "Nakfa"), ("ETB", "Efiopiya biri"), ("EUR", "Yevro"), ("FJD", "Fiji dollari"), ("FKP", "Folklend orollari funti"), ("GBP", "Funt sterling"), ("GEL", "Lari"), ("GGP", "Gernsi funti"), ("GHS", "Gana sedi"), ("GIP", "Gibraltar funti"), ("GMD", "Dalasi"), ("GNF", "Gvineya franki"), ("GTQ", "Ketsal"), ("GYD", "Gayana dollari"), ("HKD", "Gonkong dollari"), ("HNL", "Lempira"), ("HRK", "Xorvatiya kunasi"), ("HTG", "Gurda"), ("HUF", "Forint"), ("IDR", "Indoneziya rupiyasi"), ("ILS", "Yangi Isroil shekeli"), ("NIS", "Yangi Isroil shekeli"), ("IMP", "Men oroli funti"), ("INR", "Hind rupiyasi"), ("IQD", "Iroq dinori"), ("IRR", "Eron riali"), ("ISK", "Islandiya kronasi"), ("JEP", "Jersi funti"), ("JMD", "Yamayka dollari"), ("JOD", "Iordaniya dinori"), ("JPY", "Yena"), ("KES", "Keniya shillingi"), ("KGS", "Som"), ("KHR", "Riyel"), ("KMF", "Komor franki"), ("KPW", "Shimoliy Koreya voni"), ("KRW", "Janubiy Koreya voni"), ("KWD", "Kuvayt dinori"), ("KYD", "Kayman orollari dollari"), ("KZT", "Tenge"), ("LAK", "Kip"), ("LBP", "Livan funti"), ("LKR", "Shri-Lanka rupiyasi"), ("LRD", "Liberiya dollari"), ("LSL", "Loti"), ("LTL", "Litva liti"), ("LYD", "Liviya dinori"), ("MAD", "Marokash dirhami"), ("MDL", "Moldaviya leyi"), ("MGA", "Malagasi ariari"), ("MKD", "Denar"), ("MMK", "Kyat"), ("MNT", "Tugrik"), ("MOP", "Pataka"), ("MRO", "Ugiyya"), ("MUR", "Mavrikiy rupiyasi"), ("MVR", "Rufiya"), ("MWK", "Kvacha"), ("MXN", "Meksika pesosi"), ("MYR", "Malayziya ringgiti"), ("MZN", "Mozambik metikali"), ("NAD", "Namibiya dollari"), ("NGN", "Nayra"), ("NIO", "Kordoba"), ("NOK", "Norvegiya kronasi"), ("NPR", "Nepal rupiyasi"), ("NZD", "Yangi Zelandiya dollari"), ("OMR", "Ummon riali"), ("PAB", "Balboa"), ("PEN", "Sol"), ("PGK", "Kina"), ("PHP", "Filippin pesosi"), ("PKR", "Pokiston rupiyasi"), ("PLN", "Zlotiy"), ("PYG", "Guarani"), ("QAR", "Qatar riali"), ("RON", "Ruminiya leyi"), ("RSD", "Serbiya dinori"), ("RUB", "Rossiya rubli"), ("RWF", "Ruanda franki"), ("SAR", "Saudiya riyoli"), ("SBD", "Solomon orollari dollari"), ("SCR", "Seyshel rupiyasi"), ("SDG", "Sudan funti"), ("SEK", "Shvetsiya kronasi"), ("SGD", "Singapur dollari"), ("SHP", "Muqaddas Yelena funti"), ("SLL", "Leone"), ("SOS", "Somali shillingi"), ("SPL", "Luigino"), ("SRD", "Surinam dollari"), ("STD", "Dobra"), ("SVC", "Salvador koloni"), ("SYP", "Suriya funti"), ("SZL", "Lilangeni"), ("THB", "Bat"), ("TJS", "Somoniy"), ("TMT", "Yangi Turkman manati"), ("TND", "Tunis dinori"), ("TOP", "Paanga"), ("TRY", "Turk lirasi"), ("TTD", "Trinidad va Tobago dollari"), ("TVD", "Tuvalu dollari"), ("TWD", "Yangi Tayvan dollari"), ("TZS", "Tanzaniya shillingi"), ("UAH", "Grivna"), ("UGX", "Uganda shillingi"), ("USD", "AQSh dollari"), ("UYU", "Urugvay pesosi"), ("UZS", "Oʻzbek so‘mi"), ("VEF", "Suveren bolivar"), ("VND", "Dong"), ("VUV", "Vatu"), ("WST", "Tala"), ("XAF", "KFA franki BEAS"), ("XCD", "Sharqiy Karib dollari"), ("XDR", "SDR"), ("XOF", "KFA franki BCEAO"), ("XPF", "KFP franki"), ("YER", "Yaman riali"), ("ZAR", "Rand"), ("ZMW", "Zambiya kvachasi"), ("ZWD", "Zimbabve dollari"), ) price_formats = ["#,##", "%#,##", "%##,##", "% ###,##", "%# ###,##"] def pricetag(self) -> str: return ( self.numerify(self.random_element(self.price_formats)) + "\N{NO-BREAK SPACE}\N{CYRILLIC SMALL LETTER ER}." )
Provider
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_row05.py
{ "start": 315, "end": 1077 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_row05.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_default_row(24, 1) worksheet.write("A1", "Foo") worksheet.write("A10", "Bar") worksheet.write("A20", "Baz") for row in range(1, 8 + 1): worksheet.set_row(row, 24) for row in range(10, 19 + 1): worksheet.set_row(row, 24) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
huggingface__transformers
src/transformers/models/x_clip/modeling_x_clip.py
{ "start": 4250, "end": 8166 }
class ____(nn.Module): def __init__(self, config: XCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->XCLIP
XCLIPVisionEmbeddings
python
scipy__scipy
scipy/signal/tests/test_signaltools.py
{ "start": 105653, "end": 110610 }
class ____: # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt, xp): if is_numpy(xp) and dt == np.clongdouble: dt = np.cdouble # emulate np.finfo(dt).precision for complex64 and complex128 prec = {64: 15, 32: 6}[xp.finfo(dt).bits] return int(2 * prec / 3) def _setup_rank1(self, dt, mode, xp): rng = np.random.default_rng(9) a = np.random.randn(10).astype(dt) a += 1j * rng.standard_normal(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * rng.standard_normal(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) a, b, y_r = xp.asarray(a), xp.asarray(b), xp.asarray(y_r) return a, b, y_r def test_rank1_valid(self, dt_name, xp): a, b, y_r = self._setup_rank1(dt_name, 'valid', xp) dt = getattr(xp, dt_name) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt, xp)) assert y.dtype == dt # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, xp.conj(xp.flip(y_r)), decimal=self.decimal(dt, xp)) assert y.dtype == dt def test_rank1_same(self, dt_name, xp): a, b, y_r = self._setup_rank1(dt_name, 'same', xp) dt = getattr(xp, dt_name) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt, xp)) assert y.dtype == dt def test_rank1_full(self, dt_name, xp): a, b, y_r = self._setup_rank1(dt_name, 'full', xp) dt = getattr(xp, dt_name) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt, xp)) assert y.dtype == dt def test_swap_full(self, dt_name, xp): dt = getattr(xp, dt_name) d = xp.asarray([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = xp.asarray([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) xp_assert_close( y, xp.asarray([0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]), atol=1e-6, check_dtype=False ) def test_swap_same(self, dt_name, xp): d = xp.asarray([0.+0.j, 1.+1.j, 2.+2.j]) k = xp.asarray([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]) y = correlate(d, k, mode="same") xp_assert_close(y, xp.asarray([10.-2.j, 28.-6.j, 22.-6.j])) @skip_xp_backends("cupy", reason="notimplementederror") def test_rank3(self, dt_name, xp): if is_jax(xp) and SCIPY_DEVICE != "cpu": pytest.xfail(reason="error tolerances exceeded with JAX on gpu") a = np.random.randn(10, 8, 6).astype(dt_name) a += 1j * np.random.randn(10, 8, 6).astype(dt_name) b = np.random.randn(8, 6, 4).astype(dt_name) b += 1j * np.random.randn(8, 6, 4).astype(dt_name) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt_name) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) a, b, y_r = xp.asarray(a), xp.asarray(b), xp.asarray(y_r) dt = getattr(xp, dt_name) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt, xp) - 1) assert y.dtype == dt @skip_xp_backends(np_only=True) # XXX: check 0D/scalars on backends. def test_rank0(self, dt_name, xp): a = np.array(np.random.randn()).astype(dt_name) a += 1j * np.array(np.random.randn()).astype(dt_name) b = np.array(np.random.randn()).astype(dt_name) b += 1j * np.array(np.random.randn()).astype(dt_name) dt = getattr(xp, dt_name) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * np.array(-correlate(a.real, b.imag) + correlate(a.imag, b.real)) a, b = xp.asarray(a), xp.asarray(b) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt, xp) - 1) assert y.dtype == dt xp_assert_equal(correlate([1], [2j]), np.asarray(correlate(1, 2j)), check_shape=False) xp_assert_equal(correlate([2j], [3j]), np.asarray(correlate(2j, 3j)), check_shape=False) xp_assert_equal(correlate([3j], [4]), np.asarray(correlate(3j, 4)), check_shape=False)
TestCorrelateComplex
python
apache__airflow
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/k8s_model.py
{ "start": 1040, "end": 2101 }
class ____(ABC): """ Airflow Kubernetes models are here for backwards compatibility reasons only. Ideally clients should use the kubernetes API and the process of client input -> Airflow k8s models -> k8s models can be avoided. All of these models implement the `attach_to_pod` method so that they integrate with the kubernetes client. """ @abstractmethod def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod: """ Attaches to pod. :param pod: A pod to attach this Kubernetes object to :return: The pod with the object attached """ def append_to_pod(pod: k8s.V1Pod, k8s_objects: list[K8SModel] | None): """ Attach additional specs to an existing pod object. :param pod: A pod to attach a list of Kubernetes objects to :param k8s_objects: a potential None list of K8SModels :return: pod with the objects attached if they exist """ if not k8s_objects: return pod return reduce(lambda p, o: o.attach_to_pod(p), k8s_objects, pod)
K8SModel
python
getsentry__sentry
tests/sentry/issues/endpoints/test_team_groups_old.py
{ "start": 335, "end": 3119 }
class ____(APITestCase): endpoint = "sentry-api-0-team-oldest-issues" def test_simple(self) -> None: project1 = self.create_project(teams=[self.team], slug="foo") project2 = self.create_project(teams=[self.team], slug="bar") group1 = self.create_group( project=project1, first_seen=datetime(2018, 1, 12, 3, 8, 25, tzinfo=UTC), ) group2 = self.create_group( project=project2, first_seen=datetime(2015, 1, 12, 3, 8, 25, tzinfo=UTC), ) resolved_group = self.create_group(project=project2, status=GroupStatus.RESOLVED) GroupAssignee.objects.assign(group1, self.user) GroupAssignee.objects.assign(group2, self.user) GroupAssignee.objects.assign(resolved_group, self.user) other_user = self.create_user() assigned_to_other = self.create_group( project=project2, first_seen=datetime(2015, 1, 12, 3, 8, 25, tzinfo=UTC), ) GroupAssignee.objects.assign(assigned_to_other, other_user) self.create_group( project=project2, first_seen=datetime(2015, 1, 12, 3, 8, 25, tzinfo=UTC), ) # Should be excluded since it hasn't been seen for over 90 days. last_seen_too_old_group = self.create_group( project=project1, first_seen=datetime(2018, 1, 12, 3, 8, 25, tzinfo=UTC), last_seen=datetime.now(UTC) - timedelta(days=91), ) GroupAssignee.objects.assign(last_seen_too_old_group, self.user) self.login_as(user=self.user) response = self.get_success_response(self.organization.slug, self.team.slug) assert [group["id"] for group in response.data] == [str(group2.id), str(group1.id)] def test_filter_by_environment(self) -> None: project1 = self.create_project(teams=[self.team], slug="foo") environment = self.create_environment(name="prod", project=project1) self.create_environment(name="dev", project=project1) group1 = self.create_group( project=project1, first_seen=datetime(2018, 1, 12, 3, 8, 25, tzinfo=UTC), ) GroupAssignee.objects.assign(group1, self.user) GroupEnvironment.objects.create(group_id=group1.id, environment_id=environment.id) self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, self.team.slug, environment="prod" ) assert [group["id"] for group in response.data] == [str(group1.id)] response = self.get_success_response( self.organization.slug, self.team.slug, environment="dev" ) assert [group["id"] for group in response.data] == []
TeamGroupsOldTest
python
pypa__pip
src/pip/_internal/exceptions.py
{ "start": 5399, "end": 5484 }
class ____(PipError): """General exception during installation"""
InstallationError
python
mahmoud__boltons
boltons/setutils.py
{ "start": 19552, "end": 33482 }
class ____: """ helper class for complement() that implements the set methods """ __slots__ = ('_included', '_excluded') def __init__(self, included=None, excluded=None): if included is None: assert type(excluded) in (set, frozenset) elif excluded is None: assert type(included) in (set, frozenset) else: raise ValueError('one of included or excluded must be a set') self._included, self._excluded = included, excluded def __repr__(self): if self._included is None: return f'complement({repr(self._excluded)})' return f'complement(complement({repr(self._included)}))' def complemented(self): '''return a complement of the current set''' if type(self._included) is frozenset or type(self._excluded) is frozenset: return _ComplementSet(included=self._excluded, excluded=self._included) return _ComplementSet( included=None if self._excluded is None else set(self._excluded), excluded=None if self._included is None else set(self._included)) __invert__ = complemented def complement(self): '''convert the current set to its complement in-place''' self._included, self._excluded = self._excluded, self._included def __contains__(self, item): if self._included is None: return not item in self._excluded return item in self._included def add(self, item): if self._included is None: if item in self._excluded: self._excluded.remove(item) else: self._included.add(item) def remove(self, item): if self._included is None: self._excluded.add(item) else: self._included.remove(item) def pop(self): if self._included is None: raise NotImplementedError # self.missing.add(random.choice(gc.objects())) return self._included.pop() def intersection(self, other): try: return self & other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __and__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return _ComplementSet(included=inc - self._excluded) else: # - - return _ComplementSet(excluded=self._excluded.union(other._excluded)) else: if inc is None: # + - return _ComplementSet(included=exc - self._included) else: # + + return _ComplementSet(included=self._included.intersection(inc)) __rand__ = __and__ def __iand__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + self._excluded = inc - self._excluded # TODO: do this in place? else: # - - self._excluded |= exc else: if inc is None: # + - self._included -= exc self._included, self._excluded = None, self._included else: # + + self._included &= inc return self def union(self, other): try: return self | other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __or__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return _ComplementSet(excluded=self._excluded - inc) else: # - - return _ComplementSet(excluded=self._excluded.intersection(exc)) else: if inc is None: # + - return _ComplementSet(excluded=exc - self._included) else: # + + return _ComplementSet(included=self._included.union(inc)) __ror__ = __or__ def __ior__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + self._excluded -= inc else: # - - self._excluded &= exc else: if inc is None: # + - self._included, self._excluded = None, exc - self._included # TODO: do this in place? else: # + + self._included |= inc return self def update(self, items): if type(items) in (set, frozenset): inc, exc = items, None elif type(items) is _ComplementSet: inc, exc = items._included, items._excluded else: inc, exc = frozenset(items), None if self._included is None: if exc is None: # - + self._excluded &= inc else: # - - self._excluded.discard(exc) else: if inc is None: # + - self._included &= exc self._included, self._excluded = None, self._excluded else: # + + self._included.update(inc) def discard(self, items): if type(items) in (set, frozenset): inc, exc = items, None elif type(items) is _ComplementSet: inc, exc = items._included, items._excluded else: inc, exc = frozenset(items), None if self._included is None: if exc is None: # - + self._excluded.update(inc) else: # - - self._included, self._excluded = exc - self._excluded, None else: if inc is None: # + - self._included &= exc else: # + + self._included.discard(inc) def symmetric_difference(self, other): try: return self ^ other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __xor__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return _ComplementSet(excluded=self._excluded - inc) else: # - - return _ComplementSet(included=self._excluded.symmetric_difference(exc)) else: if inc is None: # + - return _ComplementSet(excluded=exc - self._included) else: # + + return _ComplementSet(included=self._included.symmetric_difference(inc)) __rxor__ = __xor__ def symmetric_difference_update(self, other): inc, exc = _norm_args_typeerror(other) if self._included is None: if exc is None: # - + self._excluded |= inc else: # - - self._excluded.symmetric_difference_update(exc) self._included, self._excluded = self._excluded, None else: if inc is None: # + - self._included |= exc self._included, self._excluded = None, self._included else: # + + self._included.symmetric_difference_update(inc) def isdisjoint(self, other): inc, exc = _norm_args_typeerror(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return inc.issubset(self._excluded) else: # - - return False else: if inc is None: # + - return self._included.issubset(exc) else: # + + return self._included.isdisjoint(inc) def issubset(self, other): '''everything missing from other is also missing from self''' try: return self <= other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __le__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return False else: # - - return self._excluded.issupserset(exc) else: if inc is None: # + - return self._included.isdisjoint(exc) else: # + + return self._included.issubset(inc) def __lt__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return False else: # - - return self._excluded > exc else: if inc is None: # + - return self._included.isdisjoint(exc) else: # + + return self._included < inc def issuperset(self, other): '''everything missing from self is also missing from super''' try: return self >= other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __ge__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return not self._excluded.intersection(inc) else: # - - return self._excluded.issubset(exc) else: if inc is None: # + - return False else: # + + return self._included.issupserset(inc) def __gt__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return not self._excluded.intersection(inc) else: # - - return self._excluded < exc else: if inc is None: # + - return False else: # + + return self._included > inc def difference(self, other): try: return self - other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __sub__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + return _ComplementSet(excluded=self._excluded | inc) else: # - - return _ComplementSet(included=exc - self._excluded) else: if inc is None: # + - return _ComplementSet(included=self._included & exc) else: # + + return _ComplementSet(included=self._included.difference(inc)) def __rsub__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented # rsub, so the expression being evaluated is "other - self" if self._included is None: if exc is None: # - + return _ComplementSet(included=inc & self._excluded) else: # - - return _ComplementSet(included=self._excluded - exc) else: if inc is None: # + - return _ComplementSet(excluded=exc | self._included) else: # + + return _ComplementSet(included=inc.difference(self._included)) def difference_update(self, other): try: self -= other except NotImplementedError: raise TypeError('argument must be another set or complement(set)') def __isub__(self, other): inc, exc = _norm_args_notimplemented(other) if inc is NotImplemented: return NotImplemented if self._included is None: if exc is None: # - + self._excluded |= inc else: # - - self._included, self._excluded = exc - self._excluded, None else: if inc is None: # + - self._included &= exc else: # + + self._included.difference_update(inc) return self def __eq__(self, other): return ( type(self) is type(other) and self._included == other._included and self._excluded == other._excluded) or ( type(other) in (set, frozenset) and self._included == other) def __hash__(self): return hash(self._included) ^ hash(self._excluded) def __len__(self): if self._included is not None: return len(self._included) raise NotImplementedError('complemented sets have undefined length') def __iter__(self): if self._included is not None: return iter(self._included) raise NotImplementedError('complemented sets have undefined contents') def __bool__(self): if self._included is not None: return bool(self._included) return True
_ComplementSet
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/util.py
{ "start": 20123, "end": 21850 }
class ____(sql_util.ColumnAdapter): """ColumnAdapter subclass which excludes adaptation of entities from non-matching mappers. """ __slots__ = ("role", "mapper", "is_aliased_class", "aliased_insp") is_aliased_class: bool aliased_insp: Optional[AliasedInsp[Any]] def __init__( self, role: _TraceAdaptRole, entity: _InternalEntityType[Any], *, equivalents: Optional[_EquivalentColumnMap] = None, adapt_required: bool = False, allow_label_resolve: bool = True, anonymize_labels: bool = False, selectable: Optional[Selectable] = None, limit_on_entity: bool = True, adapt_on_names: bool = False, adapt_from_selectables: Optional[AbstractSet[FromClause]] = None, ): self.role = role self.mapper = entity.mapper if selectable is None: selectable = entity.selectable if insp_is_aliased_class(entity): self.is_aliased_class = True self.aliased_insp = entity else: self.is_aliased_class = False self.aliased_insp = None super().__init__( selectable, equivalents, adapt_required=adapt_required, allow_label_resolve=allow_label_resolve, anonymize_labels=anonymize_labels, include_fn=self._include_fn if limit_on_entity else None, adapt_on_names=adapt_on_names, adapt_from_selectables=adapt_from_selectables, ) def _include_fn(self, elem): entity = elem._annotations.get("parentmapper", None) return not entity or entity.isa(self.mapper) or self.mapper.isa(entity)
ORMAdapter
python
PyCQA__pylint
tests/functional/c/class_attributes.py
{ "start": 415, "end": 450 }
class ____: _class_prop: int
Base
python
ansible__ansible
test/lib/ansible_test/_internal/commands/coverage/xml.py
{ "start": 5673, "end": 5775 }
class ____(CoverageCombineConfig): """Configuration for the coverage xml command."""
CoverageXmlConfig
python
huggingface__transformers
src/transformers/models/sam_hq/modeling_sam_hq.py
{ "start": 29671, "end": 32924 }
class ____(nn.Module): def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`SamHQMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__() self.hidden_size = config.hidden_size self.layer_norm_eps = config.layer_norm_eps self.self_attn = SamHQAttention(config, downsample_rate=1) self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_token_to_image = SamHQAttention(config, downsample_rate=attention_downsample_rate) self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.mlp = SamHQMLPBlock(config) self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) self.cross_attn_image_to_token = SamHQAttention(config, downsample_rate=attention_downsample_rate) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs], ): # Self attention block if self.skip_first_layer_pe: queries, _ = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out, _ = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_token_to_image( query=query, key=key, value=keys, attention_similarity=attention_similarity ) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) return queries, keys, attn_out
SamHQTwoWayAttentionBlock
python
getsentry__sentry
src/sentry/migrations/0914_increase_orgmember_user_email_max_length.py
{ "start": 155, "end": 1503 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0913_split_discover_dataset_dashboards_self_hosted"), ] operations = [ migrations.AlterField( model_name="organizationmember", name="user_email", field=models.CharField(blank=True, max_length=200, null=True), ), ]
Migration
python
Textualize__textual
docs/examples/how-to/center01.py
{ "start": 80, "end": 350 }
class ____(App): """How to center things.""" CSS = """ Screen { align: center middle; } """ def compose(self) -> ComposeResult: yield Static("Hello, World!") if __name__ == "__main__": app = CenterApp() app.run()
CenterApp
python
apache__airflow
providers/fab/src/airflow/providers/fab/www/security/permissions.py
{ "start": 2568, "end": 3698 }
class ____(TypedDict): """Details of a resource (actions and prefix).""" actions: set[str] prefix: str # Keeping DAG_ACTIONS to keep the compatibility with outdated versions of FAB provider DAG_ACTIONS = {ACTION_CAN_READ, ACTION_CAN_EDIT, ACTION_CAN_DELETE} RESOURCE_DETAILS_MAP = { RESOURCE_DAG: ResourceDetails( actions={ACTION_CAN_READ, ACTION_CAN_EDIT, ACTION_CAN_DELETE}, prefix=RESOURCE_DAG_PREFIX ), RESOURCE_DAG_RUN: ResourceDetails( actions={ACTION_CAN_READ, ACTION_CAN_CREATE, ACTION_CAN_DELETE, ACTION_CAN_ACCESS_MENU}, prefix="DAG Run:", ), } PREFIX_LIST = [details["prefix"] for details in RESOURCE_DETAILS_MAP.values()] PREFIX_RESOURCES_MAP = {details["prefix"]: resource for resource, details in RESOURCE_DETAILS_MAP.items()} def resource_name(dag_id: str, resource: str) -> str: """Return the resource name for a DAG id.""" if dag_id in RESOURCE_DETAILS_MAP.keys(): return dag_id if dag_id.startswith(tuple(PREFIX_RESOURCES_MAP.keys())): return dag_id return f"{RESOURCE_DETAILS_MAP[resource]['prefix']}{dag_id}"
ResourceDetails
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/endpoints/revocation.py
{ "start": 438, "end": 5212 }
class ____(BaseEndpoint): """Token revocation endpoint. Endpoint used by authenticated clients to revoke access and refresh tokens. Commonly this will be part of the Authorization Endpoint. """ valid_token_types = ('access_token', 'refresh_token') valid_request_methods = ('POST',) def __init__(self, request_validator, supported_token_types=None, enable_jsonp=False): BaseEndpoint.__init__(self) self.request_validator = request_validator self.supported_token_types = ( supported_token_types or self.valid_token_types) self.enable_jsonp = enable_jsonp @catch_errors_and_unavailability def create_revocation_response(self, uri, http_method='POST', body=None, headers=None): """Revoke supplied access or refresh token. The authorization server responds with HTTP status code 200 if the token has been revoked successfully or if the client submitted an invalid token. Note: invalid tokens do not cause an error response since the client cannot handle such an error in a reasonable way. Moreover, the purpose of the revocation request, invalidating the particular token, is already achieved. The content of the response body is ignored by the client as all necessary information is conveyed in the response code. An invalid token type hint value is ignored by the authorization server and does not influence the revocation response. """ resp_headers = { 'Content-Type': 'application/json', 'Cache-Control': 'no-store', 'Pragma': 'no-cache', } request = Request( uri, http_method=http_method, body=body, headers=headers) try: self.validate_revocation_request(request) log.debug('Token revocation valid for %r.', request) except OAuth2Error as e: log.debug('Client error during validation of %r. %r.', request, e) response_body = e.json if self.enable_jsonp and request.callback: response_body = '{}({});'.format(request.callback, response_body) resp_headers.update(e.headers) return resp_headers, response_body, e.status_code self.request_validator.revoke_token(request.token, request.token_type_hint, request) response_body = '' if self.enable_jsonp and request.callback: response_body = request.callback + '();' return {}, response_body, 200 def validate_revocation_request(self, request): """Ensure the request is valid. The client constructs the request by including the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: token (REQUIRED). The token that the client wants to get revoked. token_type_hint (OPTIONAL). A hint about the type of the token submitted for revocation. Clients MAY pass this parameter in order to help the authorization server to optimize the token lookup. If the server is unable to locate the token using the given hint, it MUST extend its search across all of its supported token types. An authorization server MAY ignore this parameter, particularly if it is able to detect the token type automatically. This specification defines two such values: * access_token: An Access Token as defined in [RFC6749], `section 1.4`_ * refresh_token: A Refresh Token as defined in [RFC6749], `section 1.5`_ Specific implementations, profiles, and extensions of this specification MAY define other values for this parameter using the registry defined in `Section 4.1.2`_. The client also includes its authentication credentials as described in `Section 2.3`_. of [`RFC6749`_]. .. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4 .. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5 .. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3 .. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2 .. _`RFC6749`: https://tools.ietf.org/html/rfc6749 """ self._raise_on_bad_method(request) self._raise_on_bad_post_request(request) self._raise_on_missing_token(request) self._raise_on_invalid_client(request) self._raise_on_unsupported_token(request)
RevocationEndpoint
python
pypa__packaging
src/packaging/licenses/__init__.py
{ "start": 1938, "end": 5837 }
class ____(ValueError): """Raised when a license-expression string is invalid >>> canonicalize_license_expression("invalid") Traceback (most recent call last): ... packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' """ def canonicalize_license_expression( raw_license_expression: str, ) -> NormalizedLicenseExpression: if not raw_license_expression: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) # Pad any parentheses so tokenization can be achieved by merely splitting on # whitespace. license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") licenseref_prefix = "LicenseRef-" license_refs = { ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] for ref in license_expression.split() if ref.lower().startswith(licenseref_prefix.lower()) } # Normalize to lower case so we can look up licenses/exceptions # and so boolean operators are Python-compatible. license_expression = license_expression.lower() tokens = license_expression.split() # Rather than implementing a parenthesis/boolean logic parser, create an # expression that Python can parse. Everything that is not involved with the # grammar itself is replaced with the placeholder `False` and the resultant # expression should become a valid Python expression. python_tokens = [] for token in tokens: if token not in {"or", "and", "with", "(", ")"}: python_tokens.append("False") elif token == "with": python_tokens.append("or") elif ( token == "(" and python_tokens and python_tokens[-1] not in {"or", "and", "("} ) or (token == ")" and python_tokens and python_tokens[-1] == "("): message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) else: python_tokens.append(token) python_expression = " ".join(python_tokens) try: compile(python_expression, "", "eval") except SyntaxError: message = f"Invalid license expression: {raw_license_expression!r}" raise InvalidLicenseExpression(message) from None # Take a final pass to check for unknown licenses/exceptions. normalized_tokens = [] for token in tokens: if token in {"or", "and", "with", "(", ")"}: normalized_tokens.append(token.upper()) continue if normalized_tokens and normalized_tokens[-1] == "WITH": if token not in EXCEPTIONS: message = f"Unknown license exception: {token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(EXCEPTIONS[token]["id"]) else: if token.endswith("+"): final_token = token[:-1] suffix = "+" else: final_token = token suffix = "" if final_token.startswith("licenseref-"): if not license_ref_allowed.match(final_token): message = f"Invalid licenseref: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(license_refs[final_token] + suffix) else: if final_token not in LICENSES: message = f"Unknown license: {final_token!r}" raise InvalidLicenseExpression(message) normalized_tokens.append(LICENSES[final_token]["id"] + suffix) normalized_expression = " ".join(normalized_tokens) return cast( "NormalizedLicenseExpression", normalized_expression.replace("( ", "(").replace(" )", ")"), )
InvalidLicenseExpression
python
apache__airflow
providers/google/src/airflow/providers/google/marketing_platform/links/analytics_admin.py
{ "start": 1674, "end": 2161 }
class ____(GoogleAnalyticsBaseLink): """Helper class for constructing Google Analytics Property Link.""" name = "Data Analytics Property" key = "data_analytics_property" format_str = "p{property_id}/" @staticmethod def persist( context: Context, property_id: str, ): context["task_instance"].xcom_push( key=GoogleAnalyticsPropertyLink.key, value={"property_id": property_id}, )
GoogleAnalyticsPropertyLink
python
altair-viz__altair
altair/utils/core.py
{ "start": 27211, "end": 31851 }
class ____: channel_to_name: dict[type[SchemaBase], str] name_to_channel: dict[str, dict[_ChannelType, type[SchemaBase]]] @classmethod def from_cache(cls) -> _ChannelCache: global _CHANNEL_CACHE try: cached = _CHANNEL_CACHE except NameError: cached = cls.__new__(cls) cached.channel_to_name = _init_channel_to_name() # pyright: ignore[reportAttributeAccessIssue] cached.name_to_channel = _invert_group_channels(cached.channel_to_name) _CHANNEL_CACHE = cached return _CHANNEL_CACHE def get_encoding(self, tp: type[Any], /) -> str: if encoding := self.channel_to_name.get(tp): return encoding msg = f"positional of type {type(tp).__name__!r}" raise NotImplementedError(msg) def _wrap_in_channel(self, obj: Any, encoding: str, /): if isinstance(obj, SchemaBase): return obj elif isinstance(obj, str): obj = {"shorthand": obj} elif isinstance(obj, (list, tuple)): return [self._wrap_in_channel(el, encoding) for el in obj] elif isinstance(obj, SchemaLike): obj = obj.to_dict() if channel := self.name_to_channel.get(encoding): tp = channel["value" if "value" in obj else "field"] try: # Don't force validation here; some objects won't be valid until # they're created in the context of a chart. return tp.from_dict(obj, validate=False) except jsonschema.ValidationError: # our attempts at finding the correct class have failed return obj else: warnings.warn(f"Unrecognized encoding channel {encoding!r}", stacklevel=1) return obj def infer_encoding_types(self, kwargs: dict[str, Any], /): return { encoding: self._wrap_in_channel(obj, encoding) for encoding, obj in kwargs.items() if obj is not Undefined } def _init_channel_to_name(): """ Construct a dictionary of channel type to encoding name. Note ---- The return type is not expressible using annotations, but is used internally by `mypy`/`pyright` and avoids the need for type ignores. Returns ------- mapping: dict[type[`<subclass of FieldChannelMixin and SchemaBase>`] | type[`<subclass of ValueChannelMixin and SchemaBase>`] | type[`<subclass of DatumChannelMixin and SchemaBase>`], str] """ from altair.vegalite.v6.schema import channels as ch mixins = ch.FieldChannelMixin, ch.ValueChannelMixin, ch.DatumChannelMixin return { c: c._encoding_name for c in ch.__dict__.values() if isinstance(c, type) and issubclass(c, mixins) and issubclass(c, SchemaBase) } def _invert_group_channels( m: dict[type[SchemaBase], str], / ) -> dict[str, dict[_ChannelType, type[SchemaBase]]]: """Grouped inverted index for `_ChannelCache.channel_to_name`.""" def _reduce(it: Iterator[tuple[type[Any], str]]) -> Any: """ Returns a 1-2 item dict, per channel. Never includes `datum`, as it is never utilized in `wrap_in_channel`. """ item: dict[Any, type[SchemaBase]] = {} for tp, _ in it: name = tp.__name__ if name.endswith("Datum"): continue elif name.endswith("Value"): sub_key = "value" else: sub_key = "field" item[sub_key] = tp return item grouper = groupby(m.items(), itemgetter(1)) return {k: _reduce(chans) for k, chans in grouper} def infer_encoding_types(args: tuple[Any, ...], kwargs: dict[str, Any]): """ Infer typed keyword arguments for args and kwargs. Parameters ---------- args : Sequence Sequence of function args kwargs : MutableMapping Dict of function kwargs Returns ------- kwargs : dict All args and kwargs in a single dict, with keys and types based on the channels mapping. """ cache = _ChannelCache.from_cache() # First use the mapping to convert args to kwargs based on their types. for arg in args: el = next(iter(arg), None) if isinstance(arg, (list, tuple)) else arg encoding = cache.get_encoding(type(el)) if encoding not in kwargs: kwargs[encoding] = arg else: msg = f"encoding {encoding!r} specified twice." raise ValueError(msg) return cache.infer_encoding_types(kwargs)
_ChannelCache
python
mlflow__mlflow
tests/tracing/test_fluent.py
{ "start": 1793, "end": 2259 }
class ____: @mlflow.trace() async def predict(self, x, y): z = x + y z = await self.add_one(z) z = await mlflow.trace(self.square)(z) return z # noqa: RET504 @mlflow.trace(span_type=SpanType.LLM, name="add_one_with_custom_name", attributes={"delta": 1}) async def add_one(self, z): return z + 1 async def square(self, t): res = t**2 time.sleep(0.1) return res
DefaultAsyncTestModel
python
protocolbuffers__protobuf
python/google/protobuf/descriptor.py
{ "start": 10261, "end": 17951 }
class ____(_NestedDescriptorBase): """Descriptor for a protocol message type. Attributes: name (str): Name of this protocol message type. full_name (str): Fully-qualified name of this protocol message type, which will include protocol "package" name and the name of any enclosing types. containing_type (Descriptor): Reference to the descriptor of the type containing us, or None if this is top-level. fields (list[FieldDescriptor]): Field descriptors for all fields in this type. fields_by_number (dict(int, FieldDescriptor)): Same :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by "number" attribute in each FieldDescriptor. fields_by_name (dict(str, FieldDescriptor)): Same :class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by "name" attribute in each :class:`FieldDescriptor`. nested_types (list[Descriptor]): Descriptor references for all protocol message types nested within this one. nested_types_by_name (dict(str, Descriptor)): Same Descriptor objects as in :attr:`nested_types`, but indexed by "name" attribute in each Descriptor. enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references for all enums contained within this type. enum_types_by_name (dict(str, EnumDescriptor)): Same :class:`EnumDescriptor` objects as in :attr:`enum_types`, but indexed by "name" attribute in each EnumDescriptor. enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping from enum value name to :class:`EnumValueDescriptor` for that value. extensions (list[FieldDescriptor]): All extensions defined directly within this message type (NOT within a nested type). extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor objects as :attr:`extensions`, but indexed by "name" attribute of each FieldDescriptor. is_extendable (bool): Does this type define any extension ranges? oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields in this message. oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in :attr:`oneofs`, but indexed by "name" attribute. file (FileDescriptor): Reference to file descriptor. is_map_entry: If the message type is a map entry. """ if _USE_C_DESCRIPTORS: _C_DESCRIPTOR_CLASS = _message.Descriptor def __new__( cls, name=None, full_name=None, filename=None, containing_type=None, fields=None, nested_types=None, enum_types=None, extensions=None, options=None, serialized_options=None, is_extendable=True, extension_ranges=None, oneofs=None, file=None, # pylint: disable=redefined-builtin serialized_start=None, serialized_end=None, syntax=None, is_map_entry=False, create_key=None, ): _message.Message._CheckCalledFromGeneratedFile() return _message.default_pool.FindMessageTypeByName(full_name) # NOTE: The file argument redefining a builtin is nothing we can # fix right now since we don't know how many clients already rely on the # name of the argument. def __init__( self, name, full_name, filename, containing_type, fields, nested_types, enum_types, extensions, options=None, serialized_options=None, is_extendable=True, extension_ranges=None, oneofs=None, file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin syntax=None, is_map_entry=False, create_key=None, ): """Arguments to __init__() are as described in the description of Descriptor fields above. Note that filename is an obsolete argument, that is not used anymore. Please use file.name to access this as an attribute. """ if create_key is not _internal_create_key: _Deprecated('create function Descriptor()') super(Descriptor, self).__init__( options, 'MessageOptions', name, full_name, file, containing_type, serialized_start=serialized_start, serialized_end=serialized_end, serialized_options=serialized_options, ) # We have fields in addition to fields_by_name and fields_by_number, # so that: # 1. Clients can index fields by "order in which they're listed." # 2. Clients can easily iterate over all fields with the terse # syntax: for f in descriptor.fields: ... self.fields = fields for field in self.fields: field.containing_type = self field.file = file self.fields_by_number = dict((f.number, f) for f in fields) self.fields_by_name = dict((f.name, f) for f in fields) self._fields_by_camelcase_name = None self.nested_types = nested_types for nested_type in nested_types: nested_type.containing_type = self self.nested_types_by_name = dict((t.name, t) for t in nested_types) self.enum_types = enum_types for enum_type in self.enum_types: enum_type.containing_type = self self.enum_types_by_name = dict((t.name, t) for t in enum_types) self.enum_values_by_name = dict( (v.name, v) for t in enum_types for v in t.values ) self.extensions = extensions for extension in self.extensions: extension.extension_scope = self self.extensions_by_name = dict((f.name, f) for f in extensions) self.is_extendable = is_extendable self.extension_ranges = extension_ranges self.oneofs = oneofs if oneofs is not None else [] self.oneofs_by_name = dict((o.name, o) for o in self.oneofs) for oneof in self.oneofs: oneof.containing_type = self oneof.file = file self._is_map_entry = is_map_entry @property def _parent(self): return self.containing_type or self.file @property def fields_by_camelcase_name(self): """Same FieldDescriptor objects as in :attr:`fields`, but indexed by :attr:`FieldDescriptor.camelcase_name`. """ if self._fields_by_camelcase_name is None: self._fields_by_camelcase_name = dict( (f.camelcase_name, f) for f in self.fields ) return self._fields_by_camelcase_name def EnumValueName(self, enum, value): """Returns the string name of an enum value. This is just a small helper method to simplify a common operation. Args: enum: string name of the Enum. value: int, value of the enum. Returns: string name of the enum value. Raises: KeyError if either the Enum doesn't exist or the value is not a valid value for the enum. """ return self.enum_types_by_name[enum].values_by_number[value].name def CopyToProto(self, proto): """Copies this to a descriptor_pb2.DescriptorProto. Args: proto: An empty descriptor_pb2.DescriptorProto. """ # This function is overridden to give a better doc comment. super(Descriptor, self).CopyToProto(proto) # TODO: We should have aggressive checking here, # for example: # * If you specify a repeated field, you should not be allowed # to specify a default value. # * [Other examples here as needed]. # # TODO: for this and other *Descriptor classes, we # might also want to lock things down aggressively (e.g., # prevent clients from setting the attributes). Having # stronger invariants here in general will reduce the number # of runtime checks we must do in reflection.py...
Descriptor
python
plotly__plotly.py
plotly/graph_objs/histogram2d/_stream.py
{ "start": 233, "end": 3531 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "histogram2d" _path_str = "histogram2d.stream" _valid_props = {"maxpoints", "token"} @property def maxpoints(self): """ Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. The 'maxpoints' property is a number and may be specified as: - An int or float in the interval [0, 10000] Returns ------- int|float """ return self["maxpoints"] @maxpoints.setter def maxpoints(self, val): self["maxpoints"] = val @property def token(self): """ The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. The 'token' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["token"] @token.setter def token(self, val): self["token"] = val @property def _prop_descriptions(self): return """\ maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. """ def __init__(self, arg=None, maxpoints=None, token=None, **kwargs): """ Construct a new Stream object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram2d.Stream` maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. Returns ------- Stream """ super().__init__("stream") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.histogram2d.Stream constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram2d.Stream`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("maxpoints", arg, maxpoints) self._set_property("token", arg, token) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Stream
python
astropy__astropy
astropy/cosmology/_src/io/connect.py
{ "start": 9949, "end": 12683 }
class ____(io_registry.UnifiedReadWrite): """Transform this Cosmology to another format. This function provides the Cosmology interface to the astropy unified I/O layer. This allows easily transforming to supported data formats using syntax such as:: >>> from astropy.cosmology import Planck18 >>> Planck18.to_format("mapping") {'cosmology': astropy.cosmology.core.FlatLambdaCDM, 'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966, ... Get help on the available representations for ``Cosmology`` using the ``help()`` method:: >>> Cosmology.to_format.help() # Get help and list supported formats >>> Cosmology.to_format.help('<format>') # Get detailed help on format >>> Cosmology.to_format.list_formats() # Print list of available formats Parameters ---------- format : str Format specifier. *args Positional arguments passed through to data writer. If supplied the first argument is the output filename. **kwargs Keyword arguments passed through to data writer. """ def __init__( self, instance: "astropy.cosmology.Cosmology", cls: type["astropy.cosmology.Cosmology"], ) -> None: super().__init__(instance, cls, "write", registry=convert_registry) # =============================================================== # __call__ overloads @overload def __call__( self, format: Literal["astropy.cosmology"], *args: Any, **kwargs: Any ) -> "astropy.cosmology.Cosmology": ... @overload def __call__( self, format: Literal["astropy.model"], *args: Any, **kwargs: Any ) -> "astropy.cosmology._src.io.builtin.model._CosmologyModel": ... @overload def __call__( self, format: Literal["astropy.row"], *args: Any, **kwargs: Any ) -> Row: ... @overload def __call__( self, format: Literal["astropy.table"], *args: Any, **kwargs: Any ) -> Table: ... @overload # specific mapping option, where the mapping class is specified. def __call__( self, format: Literal["mapping"], *args: Any, cls: _MT, **kwargs: Any ) -> _MT: ... @overload def __call__( self, format: Literal["mapping"], *args: Any, **kwargs: Any ) -> dict[str, Any]: ... @overload def __call__(self, format: Literal["yaml"], *args: Any, **kwargs: Any) -> str: ... @overload def __call__(self, format: str, *args: Any, **kwargs: Any) -> Any: ... def __call__(self, format: str, *args: Any, **kwargs: Any) -> Any: return self.registry.write(self._instance, None, *args, format=format, **kwargs)
CosmologyToFormat
python
Netflix__metaflow
metaflow/_vendor/click/parser.py
{ "start": 5916, "end": 6075 }
class ____(object): def __init__(self, rargs): self.opts = {} self.largs = [] self.rargs = rargs self.order = []
ParsingState
python
pytorch__pytorch
torch/fx/graph.py
{ "start": 1553, "end": 4553 }
class ____(NamedTuple): """Additional objs that we add to every graph's globals. The repr() for some standard library objects is not valid Python code without an import. For common objects of this sort, we bundle them in the globals of every FX graph. """ # How to import this object from the standard library. import_str: str # The actual object, produced from that import string. obj: Any # Combined dict of disallowed variable names so we can check with one lookup _illegal_names = {k: object() for k in keyword.kwlist} _illegal_names.update(builtins.__dict__) # can't shadow a builtin name _custom_builtins: dict[str, _CustomBuiltin] = {} def _register_custom_builtin(name: str, import_str: str, obj: Any): _custom_builtins[name] = _CustomBuiltin(import_str, obj) _illegal_names[name] = obj _register_custom_builtin("inf", "from math import inf", math.inf) _register_custom_builtin("nan", "from math import nan", math.nan) _register_custom_builtin("NoneType", "NoneType = type(None)", type(None)) _register_custom_builtin("torch", "import torch", torch) _register_custom_builtin("device", "from torch import device", torch.device) _register_custom_builtin("fx_pytree", "import torch.fx._pytree as fx_pytree", fx_pytree) _register_custom_builtin("pytree", "import torch.utils._pytree as pytree", pytree) def _is_magic(x: str) -> bool: return x.startswith("__") and x.endswith("__") def _snake_case(s: str) -> str: """ Transforms the given string ``s`` to a Python-style variable name Examples: ``mod.snake_case`` -> ``mod.snake_case`` ``mod.pascalCase``-> ``mod.pascal_case`` ``mod.ALL_CAPS`` -> ``mod.all_caps`` """ return _snake_case_sub(s).lower() # Replace occurrences where a lowercase letter is followed by an uppercase letter _snake_case_sub = functools.partial(re.compile(r"(?<=[a-z])([A-Z])").sub, r"_\1") # Find chars that can't be in a Python identifier _illegal_char_regex = re.compile("[^0-9a-zA-Z_]+") # Combined check for variable names: # 1) Checks name is not empty # 2) Checks first character is not a digit # 3) Checks name has no illegal characters (_illegal_char_regex) # 3) Splits off the number suffix (if present) _name_regex = re.compile(r"^([a-zA-Z_][0-9a-zA-Z_]*?)(?:_(\d+))?$") # starts with torch but does not start with torch._dynamo. or torch._inductor. _torch_but_not_dynamo = re.compile( r"^torch(?:\.(?!_dynamo\.|_inductor\.)[^.]+)*$" ).fullmatch def _is_from_torch(obj: Any) -> bool: module_name = getattr(obj, "__module__", None) if module_name is not None: return _torch_but_not_dynamo(module_name) is not None name = getattr(obj, "__name__", None) # exclude torch because torch.torch.torch.torch works. idk mang if name is not None and name != "torch": for guess in [torch, torch.nn.functional]: if getattr(guess, name, None) is obj: return True return False
_CustomBuiltin
python
kamyu104__LeetCode-Solutions
Python/find-the-maximum-achievable-number.py
{ "start": 38, "end": 218 }
class ____(object): def theMaximumAchievableX(self, num, t): """ :type num: int :type t: int :rtype: int """ return num+2*t
Solution
python
kamyu104__LeetCode-Solutions
Python/inorder-successor-in-bst.py
{ "start": 29, "end": 623 }
class ____(object): def inorderSuccessor(self, root, p): """ :type root: TreeNode :type p: TreeNode :rtype: TreeNode """ # If it has right subtree. if p and p.right: p = p.right while p.left: p = p.left return p # Search from root. successor = None while root and root != p: if root.val > p.val: successor = root root = root.left else: root = root.right return successor
Solution
python
apache__airflow
airflow-ctl/tests/airflow_ctl/api/test_operations.py
{ "start": 36692, "end": 39251 }
class ____: dag_id = "dag_id" dag_run_id = "dag_run_id" dag_run_response = DAGRunResponse( dag_display_name=dag_run_id, dag_run_id=dag_run_id, dag_id=dag_id, logical_date=datetime.datetime(2025, 1, 1, 0, 0, 0), queued_at=datetime.datetime(2025, 1, 1, 0, 0, 0), start_date=datetime.datetime(2025, 1, 1, 0, 0, 0), end_date=datetime.datetime(2025, 1, 1, 0, 0, 0), data_interval_start=datetime.datetime(2025, 1, 1, 0, 0, 0), data_interval_end=datetime.datetime(2025, 1, 1, 0, 0, 0), last_scheduling_decision=datetime.datetime(2025, 1, 1, 0, 0, 0), run_after=datetime.datetime(2025, 1, 1, 0, 0, 0), run_type=DagRunType.MANUAL, state=DagRunState.RUNNING, triggered_by=DagRunTriggeredByType.UI, conf={}, note=None, dag_versions=[ DagVersionResponse( id=uuid.uuid4(), version_number=1, dag_id=dag_id, bundle_name="bundle_name", bundle_version="1", created_at=datetime.datetime(2025, 1, 1, 0, 0, 0), dag_display_name=dag_id, ) ], ) dag_run_collection_response = DAGRunCollectionResponse( dag_runs=[dag_run_response], total_entries=1, ) def test_get(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == f"/api/v2/dags/{self.dag_id}/dagRuns/{self.dag_run_id}" return httpx.Response(200, json=json.loads(self.dag_run_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.dag_runs.get(dag_id=self.dag_id, dag_run_id=self.dag_run_id) assert response == self.dag_run_response def test_list(self): def handle_request(request: httpx.Request) -> httpx.Response: assert request.url.path == f"/api/v2/dags/{self.dag_id}/dagRuns" return httpx.Response(200, json=json.loads(self.dag_run_collection_response.model_dump_json())) client = make_api_client(transport=httpx.MockTransport(handle_request)) response = client.dag_runs.list( dag_id=self.dag_id, start_date=datetime.datetime(2025, 1, 1, 0, 0, 0), end_date=datetime.datetime(2025, 1, 1, 0, 0, 0), state=DagRunState.RUNNING, limit=1, ) assert response == self.dag_run_collection_response
TestDagRunOperations
python
getsentry__sentry
src/sentry/auth_v2/endpoints/auth_merge_user_accounts.py
{ "start": 717, "end": 1044 }
class ____(CamelSnakeSerializer): verification_code = serializers.CharField(required=True) ids_to_merge = serializers.ListField(child=serializers.IntegerField(), required=True) ids_to_delete = serializers.ListField(child=serializers.IntegerField(), required=True) @control_silo_endpoint
AuthMergeUserAccountsValidator
python
getsentry__sentry
tests/sentry/integrations/github/test_client.py
{ "start": 68440, "end": 71610 }
class ____(GitHubClientFileBlameBase): """ Tests that rate limits are handled correctly """ def setUp(self) -> None: super().setUp() self.file = SourceLineInfo( path="src/sentry/integrations/github/client_1.py", lineno=10, ref="master", repo=self.repo_1, code_mapping=None, # type: ignore[arg-type] ) responses.reset() responses.add( method=responses.POST, url="https://api.github.com/app/installations/1/access_tokens", body='{"token": "12345token", "expires_at": "2030-01-01T00:00:00Z"}', status=200, content_type="application/json", ) responses.add( method=responses.GET, url="https://api.github.com/rate_limit", body=orjson.dumps( { "resources": { "graphql": { "limit": 5000, "used": 4900, "remaining": 100, "reset": 1613064000, } } } ).decode(), status=200, content_type="application/json", ) @mock.patch("sentry.integrations.github.client.logger.error") @mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1") @responses.activate def test_rate_limit_exceeded(self, get_jwt, mock_logger_error) -> None: with pytest.raises(ApiRateLimitedError): self.github_client.get_blame_for_files([self.file], extra={}) mock_logger_error.assert_called_with( "sentry.integrations.github.get_blame_for_files.rate_limit", extra={ "provider": "github", "specific_resource": "graphql", "remaining": 100, "next_window": "17:20:00", "organization_integration_id": self.github_client.org_integration_id, }, ) @mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1") @responses.activate def test_no_rate_limiting(self, get_jwt) -> None: """ Tests that no error is thrown when GitHub isn't enforcing rate limits """ responses.reset() responses.add( method=responses.POST, url="https://api.github.com/app/installations/1/access_tokens", body='{"token": "12345token", "expires_at": "2030-01-01T00:00:00Z"}', status=200, content_type="application/json", ) responses.add( method=responses.GET, url="https://api.github.com/rate_limit", status=404, ) responses.add( method=responses.POST, url="https://api.github.com/graphql", json={ "data": {}, }, content_type="application/json", ) assert self.github_client.get_blame_for_files([self.file], extra={}) == []
GitHubClientFileBlameRateLimitTest
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/oxylabs_web/base.py
{ "start": 697, "end": 4848 }
class ____(BasePydanticReader): """ Scrape any website with Oxylabs Web Scraper API and get results in Markdown format. [See the API documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites) Args: username: Oxylabs API username. password: Oxylabs API password. Example: .. code-block:: python from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader reader = OxylabsWebReader( username=os.environ["OXYLABS_USERNAME"], password=os.environ["OXYLABS_PASSWORD"] ) docs = reader.load_data( [ "https://sandbox.oxylabs.io/products/1", "https://sandbox.oxylabs.io/products/2" ], { "parse": True, } ) print(docs[0].text) """ timeout_s: int = 100 oxylabs_scraper_url: str = "https://realtime.oxylabs.io/v1/queries" api: "RealtimeAPI" async_api: "AsyncAPI" default_config: dict[str, Any] = Field(default_factory=get_default_config) def __init__(self, username: str, password: str, **kwargs) -> None: from oxylabs.internal.api import AsyncAPI, APICredentials, RealtimeAPI credentials = APICredentials(username=username, password=password) bits, _ = architecture() sdk_type = ( f"oxylabs-llama-index-web-sdk-python/" f"{version('llama-index-readers-web')} " f"({python_version()}; {bits})" ) api = RealtimeAPI(credentials, sdk_type=sdk_type) async_api = AsyncAPI(credentials, sdk_type=sdk_type) super().__init__(api=api, async_api=async_api, **kwargs) @classmethod def class_name(cls) -> str: return "OxylabsWebReader" def _get_document_from_response(self, response: dict[str, Any]) -> Document: content = response["results"][0]["content"] if isinstance(content, (dict, list)): text = json_to_markdown(content) else: striped_html = strip_html(str(content)) text = markdownify(striped_html) return Document( metadata={"oxylabs_job": response["job"]}, text=text, ) async def aload_data( self, urls: list[str], additional_params: Optional[Dict[str, Any]] = None, ) -> List[Document]: """ Asynchronously load data from urls. Args: urls: List of URLs to load. additional_params: Dictionary of scraper parameters as described [here](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional) """ if additional_params is None: additional_params = {} responses = await asyncio.gather( *[ self.async_api.get_response( {**additional_params, "url": url}, self.default_config, ) for url in urls ] ) return [ self._get_document_from_response(response) for response in responses if response ] def load_data( self, urls: list[str], additional_params: Optional[Dict[str, Any]] = None, ) -> List[Document]: """ Load data from urls. Args: urls: List of URLs to load. additional_params: Dictionary of scraper parameters as described [here](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional) """ if additional_params is None: additional_params = {} responses = [ self.api.get_response( {**additional_params, "url": url}, self.default_config, ) for url in urls ] return [ self._get_document_from_response(response) for response in responses if response ]
OxylabsWebReader
python
EpistasisLab__tpot
tpot/builtin_modules/nn.py
{ "start": 9516, "end": 11249 }
class ____(PytorchClassifier): """Multilayer Perceptron, implemented in PyTorch, for use with TPOT. """ def __init__( self, num_epochs=10, batch_size=8, learning_rate=0.01, weight_decay=0, verbose=False ): self.num_epochs = num_epochs self.batch_size = batch_size self.learning_rate = learning_rate self.weight_decay = weight_decay self.verbose = verbose self.input_size = None self.num_classes = None self.network = None self.loss_function = None self.optimizer = None self.data_loader = None self.train_dset_len = None self.device = None def _init_model(self, X, y): device = _get_cuda_device_if_available() X, y = self.validate_inputs(X, y) self.input_size = X.shape[-1] self.num_classes = len(set(y)) X = torch.tensor(X, dtype=torch.float32) y = torch.tensor(y, dtype=torch.long) train_dset = TensorDataset(X, y) # Set parameters of the network self.network = _MLP(self.input_size, self.num_classes).to(device) self.loss_function = nn.CrossEntropyLoss() self.optimizer = Adam(self.network.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) self.data_loader = DataLoader( train_dset, batch_size=self.batch_size, shuffle=True, num_workers=2 ) self.train_dset_len = len(train_dset) self.device = device def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.non_deterministic = True tags.target_tags.single_output = True return tags
PytorchMLPClassifier
python
pytorch__pytorch
.github/scripts/test_pytest_caching_utils.py
{ "start": 99, "end": 3157 }
class ____(TestCase): def test_merged_lastfailed_content_with_overlap(self) -> None: last_failed_source = { "tools/tests/test_foo.py::test_num1": True, "tools/tests/test_foo.py::test_num2": True, "tools/tests/test_bar.py::test_num1": True, } last_failed_dest = { "tools/tests/test_foo.py::test_num1": True, "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } last_failed_merged = { "tools/tests/test_foo.py::test_num1": True, "tools/tests/test_foo.py::test_num2": True, "tools/tests/test_bar.py::test_num1": True, "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } merged = _merged_lastfailed_content(last_failed_source, last_failed_dest) self.assertEqual(merged, last_failed_merged) def test_merged_lastfailed_content_without_overlap(self) -> None: last_failed_source = { "tools/tests/test_foo.py::test_num1": True, "tools/tests/test_foo.py::test_num2": True, "tools/tests/test_bar.py::test_num1": True, } last_failed_dest = { "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } last_failed_merged = { "tools/tests/test_foo.py::test_num1": True, "tools/tests/test_foo.py::test_num2": True, "tools/tests/test_bar.py::test_num1": True, "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } merged = _merged_lastfailed_content(last_failed_source, last_failed_dest) self.assertEqual(merged, last_failed_merged) def test_merged_lastfailed_content_with_empty_source(self) -> None: last_failed_source = { "": True, } last_failed_dest = { "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } last_failed_merged = { "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } merged = _merged_lastfailed_content(last_failed_source, last_failed_dest) self.assertEqual(merged, last_failed_merged) def test_merged_lastfailed_content_with_empty_dest(self) -> None: last_failed_source = { "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } last_failed_dest = { "": True, } last_failed_merged = { "tools/tests/test_car.py::test_num1": True, "tools/tests/test_car.py::test_num2": True, } merged = _merged_lastfailed_content(last_failed_source, last_failed_dest) self.assertEqual(merged, last_failed_merged) if __name__ == "__main__": main()
TestPytestCachingUtils
python
PrefectHQ__prefect
tests/server/orchestration/test_core_policy.py
{ "start": 9285, "end": 11552 }
class ____: @pytest.mark.parametrize( "initial_state_type", [ states.StateType.SCHEDULED, states.StateType.PENDING, ], ) @pytest.mark.parametrize( "proposed_state_type", [ states.StateType.PENDING, states.StateType.RUNNING, ], ) async def test_task_parameters_id_copied_from_scheduled_to_pending( self, session, initialize_orchestration, initial_state_type, proposed_state_type, ): intended_transition = (initial_state_type, proposed_state_type) task_parameters_id = uuid4() ctx = await initialize_orchestration( session, "task", *intended_transition, initial_details={"task_parameters_id": task_parameters_id}, ) async with CopyTaskParametersID(ctx, *intended_transition) as ctx: await ctx.validate_proposed_state() assert ctx.validated_state_type == proposed_state_type assert ( ctx.validated_state.state_details.task_parameters_id == task_parameters_id ) @pytest.mark.parametrize( "initial_state_type", [ states.StateType.SCHEDULED, states.StateType.PENDING, ], ) @pytest.mark.parametrize( "proposed_state_type", [ states.StateType.COMPLETED, states.StateType.FAILED, states.StateType.CANCELLED, states.StateType.CRASHED, ], ) async def test_task_parameters_id_not_copied_for_other_transitions( self, session, initialize_orchestration, initial_state_type, proposed_state_type, ): intended_transition = (initial_state_type, proposed_state_type) ctx = await initialize_orchestration( session, "task", *intended_transition, initial_details={"task_parameters_id": uuid4()}, ) scheduling_rule = CopyTaskParametersID(ctx, *intended_transition) async with scheduling_rule as ctx: await ctx.validate_proposed_state() assert await scheduling_rule.invalid()
TestCopyTaskParametersID
python
walkccc__LeetCode
solutions/2457. Minimum Addition to Make Integer Beautiful/2457.py
{ "start": 0, "end": 431 }
class ____: def makeIntegerBeautiful(self, n: int, target: int) -> int: ans = 0 power = 1 # e.g. n = 123. After tunning off the last bit by adding 7, n = 130. # Effectively, we can think n as 13. That's why we do n = (n / 10) + 1. while sum(map(int, str(n))) > target: # the cost to turn off the last digit ans += power * (10 - n % 10) n = n // 10 + 1 power *= 10 return ans
Solution
python
explosion__spaCy
spacy/pipeline/span_ruler.py
{ "start": 3751, "end": 18993 }
class ____(Pipe): """The SpanRuler lets you add spans to the `Doc.spans` using token-based rules or exact phrase matches. DOCS: https://spacy.io/api/spanruler USAGE: https://spacy.io/usage/rule-based-matching#spanruler """ def __init__( self, nlp: Language, name: str = "span_ruler", *, spans_key: Optional[str] = DEFAULT_SPANS_KEY, spans_filter: Optional[ Callable[[Iterable[Span], Iterable[Span]], Iterable[Span]] ] = None, annotate_ents: bool = False, ents_filter: Callable[ [Iterable[Span], Iterable[Span]], Iterable[Span] ] = util.filter_chain_spans, phrase_matcher_attr: Optional[Union[int, str]] = None, matcher_fuzzy_compare: Callable = levenshtein_compare, validate: bool = False, overwrite: bool = False, scorer: Optional[Callable] = partial( overlapping_labeled_spans_score, spans_key=DEFAULT_SPANS_KEY ), ) -> None: """Initialize the span ruler. If patterns are supplied here, they need to be a list of dictionaries with a `"label"` and `"pattern"` key. A pattern can either be a token pattern (list) or a phrase pattern (string). For example: `{'label': 'ORG', 'pattern': 'Apple'}`. nlp (Language): The shared nlp object to pass the vocab to the matchers and process phrase patterns. name (str): Instance name of the current pipeline component. Typically passed in automatically from the factory when the component is added. Used to disable the current span ruler while creating phrase patterns with the nlp object. spans_key (Optional[str]): The spans key to save the spans under. If `None`, no spans are saved. Defaults to "ruler". spans_filter (Optional[Callable[[Iterable[Span], Iterable[Span]], List[Span]]): The optional method to filter spans before they are assigned to doc.spans. Defaults to `None`. annotate_ents (bool): Whether to save spans to doc.ents. Defaults to `False`. ents_filter (Callable[[Iterable[Span], Iterable[Span]], List[Span]]): The method to filter spans before they are assigned to doc.ents. Defaults to `util.filter_chain_spans`. phrase_matcher_attr (Optional[Union[int, str]]): Token attribute to match on, passed to the internal PhraseMatcher as `attr`. Defaults to `None`. matcher_fuzzy_compare (Callable): The fuzzy comparison method for the internal Matcher. Defaults to spacy.matcher.levenshtein.levenshtein_compare. validate (bool): Whether patterns should be validated, passed to Matcher and PhraseMatcher as `validate`. overwrite (bool): Whether to remove any existing spans under this spans key if `spans_key` is set, and/or to remove any ents under `doc.ents` if `annotate_ents` is set. Defaults to `True`. scorer (Optional[Callable]): The scoring method. Defaults to spacy.pipeline.span_ruler.overlapping_labeled_spans_score. DOCS: https://spacy.io/api/spanruler#init """ self.nlp = nlp self.name = name self.spans_key = spans_key self.annotate_ents = annotate_ents self.phrase_matcher_attr = phrase_matcher_attr self.validate = validate self.overwrite = overwrite self.spans_filter = spans_filter self.ents_filter = ents_filter self.scorer = scorer self.matcher_fuzzy_compare = matcher_fuzzy_compare self._match_label_id_map: Dict[int, Dict[str, str]] = {} self.clear() def __len__(self) -> int: """The number of all labels added to the span ruler.""" return len(self._patterns) def __contains__(self, label: str) -> bool: """Whether a label is present in the patterns.""" for label_id in self._match_label_id_map.values(): if label_id["label"] == label: return True return False @property def key(self) -> Optional[str]: """Key of the doc.spans dict to save the spans under.""" return self.spans_key def __call__(self, doc: Doc) -> Doc: """Find matches in document and add them as entities. doc (Doc): The Doc object in the pipeline. RETURNS (Doc): The Doc with added entities, if available. DOCS: https://spacy.io/api/spanruler#call """ error_handler = self.get_error_handler() try: matches = self.match(doc) self.set_annotations(doc, matches) return doc except Exception as e: return error_handler(self.name, self, [doc], e) def match(self, doc: Doc): self._require_patterns() with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="\\[W036") matches = cast( List[Tuple[int, int, int]], list(self.matcher(doc)) + list(self.phrase_matcher(doc)), ) deduplicated_matches = set( Span( doc, start, end, label=self._match_label_id_map[m_id]["label"], span_id=self._match_label_id_map[m_id]["id"], ) for m_id, start, end in matches if start != end ) return sorted(list(deduplicated_matches)) def set_annotations(self, doc, matches): """Modify the document in place""" # set doc.spans if spans_key is set if self.key: spans = [] if self.key in doc.spans and not self.overwrite: spans = doc.spans[self.key] spans.extend( self.spans_filter(spans, matches) if self.spans_filter else matches ) doc.spans[self.key] = spans # set doc.ents if annotate_ents is set if self.annotate_ents: spans = [] if not self.overwrite: spans = list(doc.ents) spans = self.ents_filter(spans, matches) try: doc.ents = sorted(spans) except ValueError: raise ValueError(Errors.E854) @property def labels(self) -> Tuple[str, ...]: """All labels present in the match patterns. RETURNS (set): The string labels. DOCS: https://spacy.io/api/spanruler#labels """ return tuple(sorted(set([cast(str, p["label"]) for p in self._patterns]))) @property def ids(self) -> Tuple[str, ...]: """All IDs present in the match patterns. RETURNS (set): The string IDs. DOCS: https://spacy.io/api/spanruler#ids """ return tuple( sorted(set([cast(str, p.get("id")) for p in self._patterns]) - set([None])) ) def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, patterns: Optional[Sequence[PatternType]] = None, ): """Initialize the pipe for training. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. patterns (Optional[Iterable[PatternType]]): The list of patterns. DOCS: https://spacy.io/api/spanruler#initialize """ self.clear() if patterns: self.add_patterns(patterns) # type: ignore[arg-type] @property def patterns(self) -> List[PatternType]: """Get all patterns that were added to the span ruler. RETURNS (list): The original patterns, one dictionary per pattern. DOCS: https://spacy.io/api/spanruler#patterns """ return self._patterns def add_patterns(self, patterns: List[PatternType]) -> None: """Add patterns to the span ruler. A pattern can either be a token pattern (list of dicts) or a phrase pattern (string). For example: {'label': 'ORG', 'pattern': 'Apple'} {'label': 'ORG', 'pattern': 'Apple', 'id': 'apple'} {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]} patterns (list): The patterns to add. DOCS: https://spacy.io/api/spanruler#add_patterns """ # disable the nlp components after this one in case they haven't been # initialized / deserialized yet try: current_index = -1 for i, (name, pipe) in enumerate(self.nlp.pipeline): if self == pipe: current_index = i break subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index:]] except ValueError: subsequent_pipes = [] with self.nlp.select_pipes(disable=subsequent_pipes): phrase_pattern_labels = [] phrase_pattern_texts = [] for entry in patterns: p_label = cast(str, entry["label"]) p_id = cast(str, entry.get("id", "")) label = repr((p_label, p_id)) self._match_label_id_map[self.nlp.vocab.strings.as_int(label)] = { "label": p_label, "id": p_id, } if isinstance(entry["pattern"], str): phrase_pattern_labels.append(label) phrase_pattern_texts.append(entry["pattern"]) elif isinstance(entry["pattern"], list): self.matcher.add(label, [entry["pattern"]]) else: raise ValueError(Errors.E097.format(pattern=entry["pattern"])) self._patterns.append(entry) for label, pattern in zip( phrase_pattern_labels, self.nlp.pipe(phrase_pattern_texts), ): self.phrase_matcher.add(label, [pattern]) def clear(self) -> None: """Reset all patterns. RETURNS: None DOCS: https://spacy.io/api/spanruler#clear """ self._patterns: List[PatternType] = [] self.matcher: Matcher = Matcher( self.nlp.vocab, validate=self.validate, fuzzy_compare=self.matcher_fuzzy_compare, ) self.phrase_matcher: PhraseMatcher = PhraseMatcher( self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self.validate, ) def remove(self, label: str) -> None: """Remove a pattern by its label. label (str): Label of the pattern to be removed. RETURNS: None DOCS: https://spacy.io/api/spanruler#remove """ if label not in self: raise ValueError( Errors.E1024.format(attr_type="label", label=label, component=self.name) ) self._patterns = [p for p in self._patterns if p["label"] != label] for m_label in self._match_label_id_map: if self._match_label_id_map[m_label]["label"] == label: m_label_str = self.nlp.vocab.strings.as_string(m_label) if m_label_str in self.phrase_matcher: self.phrase_matcher.remove(m_label_str) if m_label_str in self.matcher: self.matcher.remove(m_label_str) def remove_by_id(self, pattern_id: str) -> None: """Remove a pattern by its pattern ID. pattern_id (str): ID of the pattern to be removed. RETURNS: None DOCS: https://spacy.io/api/spanruler#remove_by_id """ orig_len = len(self) self._patterns = [p for p in self._patterns if p.get("id") != pattern_id] if orig_len == len(self): raise ValueError( Errors.E1024.format( attr_type="ID", label=pattern_id, component=self.name ) ) for m_label in self._match_label_id_map: if self._match_label_id_map[m_label]["id"] == pattern_id: m_label_str = self.nlp.vocab.strings.as_string(m_label) if m_label_str in self.phrase_matcher: self.phrase_matcher.remove(m_label_str) if m_label_str in self.matcher: self.matcher.remove(m_label_str) def _require_patterns(self) -> None: """Raise a warning if this component has no patterns defined.""" if len(self) == 0: warnings.warn(Warnings.W036.format(name=self.name)) def from_bytes( self, bytes_data: bytes, *, exclude: Iterable[str] = SimpleFrozenList() ) -> "SpanRuler": """Load the span ruler from a bytestring. bytes_data (bytes): The bytestring to load. RETURNS (SpanRuler): The loaded span ruler. DOCS: https://spacy.io/api/spanruler#from_bytes """ self.clear() deserializers = { "patterns": lambda b: self.add_patterns(srsly.json_loads(b)), } util.from_bytes(bytes_data, deserializers, exclude) return self def to_bytes(self, *, exclude: Iterable[str] = SimpleFrozenList()) -> bytes: """Serialize the span ruler to a bytestring. RETURNS (bytes): The serialized patterns. DOCS: https://spacy.io/api/spanruler#to_bytes """ serializers = { "patterns": lambda: srsly.json_dumps(self.patterns), } return util.to_bytes(serializers, exclude) def from_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> "SpanRuler": """Load the span ruler from a directory. path (Union[str, Path]): A path to a directory. RETURNS (SpanRuler): The loaded span ruler. DOCS: https://spacy.io/api/spanruler#from_disk """ self.clear() path = ensure_path(path) deserializers = { "patterns": lambda p: self.add_patterns(srsly.read_jsonl(p)), } util.from_disk(path, deserializers, {}) return self def to_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> None: """Save the span ruler patterns to a directory. path (Union[str, Path]): A path to a directory. DOCS: https://spacy.io/api/spanruler#to_disk """ path = ensure_path(path) serializers = { "patterns": lambda p: srsly.write_jsonl(p, self.patterns), } util.to_disk(path, serializers, {}) # Setup backwards compatibility hook for factories def __getattr__(name): if name == "make_span_ruler": module = importlib.import_module("spacy.pipeline.factories") return module.make_span_ruler elif name == "make_entity_ruler": module = importlib.import_module("spacy.pipeline.factories") return module.make_future_entity_ruler raise AttributeError(f"module {__name__} has no attribute {name}")
SpanRuler
python
numba__numba
numba/tests/test_typeinfer.py
{ "start": 15987, "end": 18310 }
class ____(unittest.TestCase): """ Tests for typing.Context.resolve_overload(). """ def assert_resolve_overload(self, cases, args, expected): ctx = typing.Context() got = ctx.resolve_overload("foo", cases, args, {}) self.assertEqual(got, expected) def test_non_ambiguous_match(self): def check(args, expected): self.assert_resolve_overload(cases, args, expected) # Order shouldn't matter here self.assert_resolve_overload(cases[::-1], args, expected) cases = [i8(i8, i8), i32(i32, i32), f64(f64, f64)] # Exact match check((i8, i8), cases[0]) check((i32, i32), cases[1]) check((f64, f64), cases[2]) # "Promote" conversion check((i8, i16), cases[1]) check((i32, i8), cases[1]) check((i32, i8), cases[1]) check((f32, f32), cases[2]) # "Safe" conversion check((u32, u32), cases[2]) # "Unsafe" conversion check((i64, i64), cases[2]) def test_ambiguous_match(self): # When the best match is ambiguous (there is a tie), the first # best case in original sequence order should be returned. def check(args, expected, expected_reverse): self.assert_resolve_overload(cases, args, expected) self.assert_resolve_overload(cases[::-1], args, expected_reverse) cases = [i16(i16, i16), i32(i32, i32), f64(f64, f64)] # Two "promote" conversions check((i8, i8), cases[0], cases[1]) # Two "safe" conversions check((u16, u16), cases[1], cases[2]) cases = [i32(i32, i32), f32(f32, f32)] # Two "unsafe" conversions check((u32, u32), cases[0], cases[1]) def test_ambiguous_error(self): ctx = typing.Context() cases = [i16(i16, i16), i32(i32, i32)] with self.assertRaises(TypeError) as raises: ctx.resolve_overload("foo", cases, (i8, i8), {}, allow_ambiguous=False) self.assertEqual(str(raises.exception).splitlines(), ["Ambiguous overloading for foo (int8, int8):", "(int16, int16) -> int16", "(int32, int32) -> int32", ])
TestResolveOverload
python
google__pytype
pytype/tests/test_decorators2.py
{ "start": 4790, "end": 9426 }
class ____(test_base.BaseTest): """Test decorators.""" def test_annotated_super_call_under_bad_decorator(self): self.InferWithErrors(""" class Foo: def Run(self) -> None: ... class Bar(Foo): @bad_decorator # name-error def Run(self): return super(Bar, self).Run() """) def test_replace_self_to_stararg(self): # Without decorator, `self` will be in `signature.param_names`. # But after replacing, `*args` won't be in `signature.param_names`. self.Check(""" from typing import TypeVar T = TypeVar('T') def dec(func): def f(*args: T, **kwargs: T): pass return f class MyClass: @dec def func(self, x): pass x = MyClass() x.func(12) """) def test_instance_as_decorator_error(self): errors = self.CheckWithErrors(""" class Decorate: def __call__(self, func): return func class Foo: @classmethod # not-callable>=3.11 @Decorate # forgot to instantiate Decorate # wrong-arg-count[e]>=3.11 def bar(cls): # wrong-arg-count[e]<3.11 # not-callable<3.11 pass Foo.bar() """) self.assertErrorRegexes(errors, {"e": r"Decorate.*1.*2"}) def test_uncallable_instance_as_decorator(self): errors = self.CheckWithErrors(""" class Decorate: pass # forgot to define __call__ class Foo: @classmethod # not-callable>=3.11 @Decorate # forgot to instantiate Decorate # wrong-arg-count[e1]>=3.11 def bar(cls): # wrong-arg-count[e1]<3.11 # not-callable<3.11 pass Foo.bar() """) self.assertErrorRegexes(errors, {"e1": r"Decorate.*1.*2"}) def test_instance_method_with_annotated_decorator(self): ty = self.Infer(""" from typing import Any, Callable def decorate(f: Callable[[Any, int], int]) -> Callable[[Any, int], int]: return f class Foo: @decorate def f(self, x): return x Foo().f(0) Foo.f(Foo(), 0) """) self.assertTypesMatchPytd( ty, """ from typing import Any, Callable def decorate(f: Callable[[Any, int], int]) -> Callable[[Any, int], int]: ... class Foo: def f(self, _1: int) -> int: ... """, ) def test_instance_method_with_unannotated_decorator(self): with self.DepTree([( "lock.py", """ class Lock: def __call__(self, f): def wrapped(a, b): pass return wrapped """, )]): ty = self.Infer(""" import lock class Foo: @lock.Lock() def f(self): pass Foo().f(0) """) self.assertTypesMatchPytd( ty, """ import lock from typing import Any class Foo: def f(self, _1) -> Any: ... """, ) def test_instance_method_from_generic_callable(self): ty = self.Infer(""" from typing import Callable, TypeVar T = TypeVar('T') def decorate(f) -> Callable[[T], T]: return lambda x: x class Foo: @decorate def f(self): pass assert_type(Foo().f(), Foo) """) self.assertTypesMatchPytd( ty, """ from typing import Callable, TypeVar T = TypeVar('T') def decorate(f) -> Callable[[T], T]: ... class Foo: def f(self: T) -> T: ... """, ) def test_typevar_in_decorated_function_in_function(self): self.Check(""" from typing import Any, TypeVar T = TypeVar('T') def decorate(f) -> Any: return f def f_out(x: T) -> T: @decorate def f_in() -> T: return x return x """) def test_typevar_in_decorated_method_in_class(self): self.Check(""" from typing import Any, Generic, TypeVar T = TypeVar('T') def decorate(f) -> Any: return f class C(Generic[T]): @decorate def f(self, x: T): pass """) def test_self_in_decorated_method(self): self.Check(""" from typing import Any def decorate(f) -> Any: return f class C: @decorate def f(self): assert_type(self, C) """) def test_self_in_contextmanager(self): self.CheckWithErrors(""" import contextlib class Foo: @contextlib.contextmanager def ctx(self): print(self.attribute_error) # attribute-error """) if __name__ == "__main__": test_base.main()
DecoratorsTest
python
facebook__pyre-check
api/query.py
{ "start": 1265, "end": 1343 }
class ____: type_name: str start: Position stop: Position
Annotation
python
pytorch__pytorch
test/mobile/test_lite_script_module.py
{ "start": 17846, "end": 21217 }
class ____(QuantizationLiteTestCase): def test_single_layer(self): input = torch.rand(2, 5, dtype=torch.float) quantized_model = self._create_quantized_model( model_class=AnnotatedSingleLayerLinearModel, qengine="qnnpack" ) self._compare_script_and_mobile(model=quantized_model, input=input) def test_two_layer(self): input = torch.rand(2, 5, dtype=torch.float) quantized_model = self._create_quantized_model(model_class=TwoLayerLinearModel) self._compare_script_and_mobile(model=quantized_model, input=input) def test_annotated_nested(self): input = torch.rand(2, 5, dtype=torch.float) quantized_model = self._create_quantized_model( model_class=AnnotatedNestedModel, qengine="qnnpack" ) self._compare_script_and_mobile(model=quantized_model, input=input) def test_quantization_example(self): # From the example in Static Quantization section of https://pytorch.org/docs/stable/quantization.html class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.conv = torch.nn.Conv2d(1, 1, 1) self.relu = torch.nn.ReLU() self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.relu(x) x = self.dequant(x) return x model_fp32 = M() model_fp32.eval() model_fp32.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack") model_fp32_fused = torch.ao.quantization.fuse_modules( model_fp32, [["conv", "relu"]] ) model_fp32_prepared = torch.ao.quantization.prepare(model_fp32_fused) input_fp32 = torch.randn(4, 1, 4, 4) model_fp32_prepared(input_fp32) model_int8 = torch.ao.quantization.convert(model_fp32_prepared) input = torch.randn(4, 1, 4, 4) self._compare_script_and_mobile(model=model_int8, input=input) def test_bundled_input_with_dynamic_type(self): class Model(torch.nn.Module): def forward( self, x: dict[int, torch.Tensor], y: dict[int, torch.Tensor], z: dict[int, torch.Tensor], ): return x model = Model() script_module = torch.jit.script(model) sample_input = { script_module.forward: [ ( {0: torch.ones(1)}, {1: torch.ones(1)}, {2: torch.ones(1)}, ) ] } bundled_model = torch.utils.bundled_inputs.bundle_inputs( script_module, sample_input ) buf = bundled_model._save_to_buffer_for_lite_interpreter() mobile_module = _load_for_lite_interpreter(io.BytesIO(buf)) i = mobile_module.run_method("get_all_bundled_inputs") self.assertEqual( i[0], ( {0: torch.ones(1)}, {1: torch.ones(1)}, {2: torch.ones(1)}, ), ) if __name__ == "__main__": run_tests()
TestLiteScriptQuantizedModule
python
redis__redis-py
redis/multidb/healthcheck.py
{ "start": 6128, "end": 10025 }
class ____(HealthCheck): """ Health check available for Redis Enterprise deployments. Verify via REST API that the database is healthy based on different lags. """ def __init__( self, rest_api_port: int = 9443, lag_aware_tolerance: int = DEFAULT_LAG_AWARE_TOLERANCE, timeout: float = DEFAULT_TIMEOUT, auth_basic: Optional[Tuple[str, str]] = None, verify_tls: bool = True, # TLS verification (server) options ca_file: Optional[str] = None, ca_path: Optional[str] = None, ca_data: Optional[Union[str, bytes]] = None, # Mutual TLS (client cert) options client_cert_file: Optional[str] = None, client_key_file: Optional[str] = None, client_key_password: Optional[str] = None, ): """ Initialize LagAwareHealthCheck with the specified parameters. Args: rest_api_port: Port number for Redis Enterprise REST API (default: 9443) lag_aware_tolerance: Tolerance in lag between databases in MS (default: 100) timeout: Request timeout in seconds (default: DEFAULT_TIMEOUT) auth_basic: Tuple of (username, password) for basic authentication verify_tls: Whether to verify TLS certificates (default: True) ca_file: Path to CA certificate file for TLS verification ca_path: Path to CA certificates directory for TLS verification ca_data: CA certificate data as string or bytes client_cert_file: Path to client certificate file for mutual TLS client_key_file: Path to client private key file for mutual TLS client_key_password: Password for encrypted client private key """ self._http_client = HttpClient( timeout=timeout, auth_basic=auth_basic, retry=Retry(NoBackoff(), retries=0), verify_tls=verify_tls, ca_file=ca_file, ca_path=ca_path, ca_data=ca_data, client_cert_file=client_cert_file, client_key_file=client_key_file, client_key_password=client_key_password, ) self._rest_api_port = rest_api_port self._lag_aware_tolerance = lag_aware_tolerance def check_health(self, database) -> bool: if database.health_check_url is None: raise ValueError( "Database health check url is not set. Please check DatabaseConfig for the current database." ) if isinstance(database.client, Redis): db_host = database.client.get_connection_kwargs()["host"] else: db_host = database.client.startup_nodes[0].host base_url = f"{database.health_check_url}:{self._rest_api_port}" self._http_client.base_url = base_url # Find bdb matching to the current database host matching_bdb = None for bdb in self._http_client.get("/v1/bdbs"): for endpoint in bdb["endpoints"]: if endpoint["dns_name"] == db_host: matching_bdb = bdb break # In case if the host was set as public IP for addr in endpoint["addr"]: if addr == db_host: matching_bdb = bdb break if matching_bdb is None: logger.warning("LagAwareHealthCheck failed: Couldn't find a matching bdb") raise ValueError("Could not find a matching bdb") url = ( f"/v1/bdbs/{matching_bdb['uid']}/availability" f"?extend_check=lag&availability_lag_tolerance_ms={self._lag_aware_tolerance}" ) self._http_client.get(url, expect_json=False) # Status checked in an http client, otherwise HttpError will be raised return True
LagAwareHealthCheck
python
Lightning-AI__lightning
tests/tests_pytorch/core/test_datamodules.py
{ "start": 14022, "end": 14201 }
class ____(LightningDataModule): def __init__(self, arg0, arg1, kwarg0=None): super().__init__() self.save_hyperparameters() # single arg
DataModuleWithHparams_0
python
PrefectHQ__prefect
src/prefect/client/orchestration/_blocks_schemas/client.py
{ "start": 418, "end": 3386 }
class ____(BaseClient): def create_block_schema(self, block_schema: "BlockSchemaCreate") -> "BlockSchema": """ Create a block schema in the Prefect API. """ try: response = self.request( "POST", "/block_schemas/", json=block_schema.model_dump( mode="json", exclude_unset=True, exclude={"id", "block_type", "checksum"}, ), ) except HTTPStatusError as e: if e.response.status_code == 409: raise ObjectAlreadyExists(http_exc=e) from e else: raise from prefect.client.schemas.objects import BlockSchema return BlockSchema.model_validate(response.json()) def read_block_schema_by_checksum( self, checksum: str, version: str | None = None ) -> "BlockSchema": """ Look up a block schema checksum """ try: response = self.request( "GET", "/block_schemas/checksum/{checksum}", path_params={"checksum": checksum}, **({"params": {"version": version}} if version else {}), ) except HTTPStatusError as e: if e.response.status_code == 404: raise ObjectNotFound(http_exc=e) from e else: raise from prefect.client.schemas.objects import BlockSchema return BlockSchema.model_validate(response.json()) def read_block_schemas(self) -> "list[BlockSchema]": """ Read all block schemas Raises: httpx.RequestError: if a valid block schema was not found Returns: A BlockSchema. """ response = self.request("POST", "/block_schemas/filter", json={}) from prefect.client.schemas.objects import BlockSchema return BlockSchema.model_validate_list(response.json()) def get_most_recent_block_schema_for_block_type( self, block_type_id: "UUID", ) -> "BlockSchema | None": """ Fetches the most recent block schema for a specified block type ID. Args: block_type_id: The ID of the block type. Raises: httpx.RequestError: If the request fails for any reason. Returns: The most recent block schema or None. """ try: response = self.request( "POST", "/block_schemas/filter", json={ "block_schemas": {"block_type_id": {"any_": [str(block_type_id)]}}, "limit": 1, }, ) except HTTPStatusError: raise from prefect.client.schemas.objects import BlockSchema return next(iter(BlockSchema.model_validate_list(response.json())), None)
BlocksSchemaClient
python
python__mypy
mypy/nodes.py
{ "start": 82749, "end": 83196 }
class ____(Expression): """Dictionary literal expression {key: value, ...}.""" __slots__ = ("items",) __match_args__ = ("items",) items: list[tuple[Expression | None, Expression]] def __init__(self, items: list[tuple[Expression | None, Expression]]) -> None: super().__init__() self.items = items def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_dict_expr(self)
DictExpr
python
spyder-ide__spyder
spyder/plugins/outlineexplorer/main_widget.py
{ "start": 610, "end": 708 }
class ____: Main = 'main_section' DisplayOptions = 'display_options'
OutlineExplorerSections
python
tensorflow__tensorflow
tensorflow/python/data/experimental/ops/readers.py
{ "start": 50365, "end": 51606 }
class ____(dataset_ops.DatasetV1Adapter): """A `Dataset` consisting of the results from a SQL query.""" @functools.wraps(SqlDatasetV2.__init__) def __init__(self, driver_name, data_source_name, query, output_types): wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types) super(SqlDatasetV1, self).__init__(wrapped) if tf2.enabled(): CsvDataset = CsvDatasetV2 SqlDataset = SqlDatasetV2 make_batched_features_dataset = make_batched_features_dataset_v2 make_csv_dataset = make_csv_dataset_v2 else: CsvDataset = CsvDatasetV1 SqlDataset = SqlDatasetV1 make_batched_features_dataset = make_batched_features_dataset_v1 make_csv_dataset = make_csv_dataset_v1 def _tf2_callback(): global CsvDataset, SqlDataset, make_batched_features_dataset, make_csv_dataset if tf2.enabled(): CsvDataset = CsvDatasetV2 SqlDataset = SqlDatasetV2 make_batched_features_dataset = make_batched_features_dataset_v2 make_csv_dataset = make_csv_dataset_v2 else: CsvDataset = CsvDatasetV1 SqlDataset = SqlDatasetV1 make_batched_features_dataset = make_batched_features_dataset_v1 make_csv_dataset = make_csv_dataset_v1 v2_compat.register_data_v2_callback(_tf2_callback)
SqlDatasetV1
python
ansible__ansible
lib/ansible/modules/apt_repository.py
{ "start": 15900, "end": 30694 }
class ____(SourcesList): # prefer api.launchpad.net over launchpad.net/api # see: https://github.com/ansible/ansible/pull/81978#issuecomment-1767062178 LP_API = 'https://api.launchpad.net/1.0/~%s/+archive/%s' PPA_URI = 'https://ppa.launchpadcontent.net' def __init__(self, module): self.module = module self.codename = module.params['codename'] or distro.codename super(UbuntuSourcesList, self).__init__(module) self.apt_key_bin = self.module.get_bin_path('apt-key', required=False) self.gpg_bin = self.module.get_bin_path('gpg', required=False) if not self.apt_key_bin and not self.gpg_bin: msg = 'Either apt-key or gpg binary is required, but neither could be found.' \ 'The apt-key CLI has been deprecated and removed in modern Debian and derivatives, ' \ 'you might want to use "deb822_repository" instead.' self.module.fail_json(msg) def __deepcopy__(self, memo=None): return UbuntuSourcesList(self.module) def _get_ppa_info(self, owner_name, ppa_name): lp_api = self.LP_API % (owner_name, ppa_name) headers = dict(Accept='application/json') response, info = fetch_url(self.module, lp_api, headers=headers) if info['status'] != 200: self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg']) return json.loads(to_native(response.read())) def _expand_ppa(self, path): ppa = path.split(':')[1] ppa_owner = ppa.split('/')[0] try: ppa_name = ppa.split('/')[1] except IndexError: ppa_name = 'ppa' line = 'deb %s/%s/%s/ubuntu %s main' % (self.PPA_URI, ppa_owner, ppa_name, self.codename) return line, ppa_owner, ppa_name def _key_already_exists(self, key_fingerprint): if self.apt_key_bin: locale = get_best_parsable_locale(self.module) APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) self.module.run_command_environ_update = APT_ENV rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True) found = bool(not err or 'nothing exported' not in err) else: found = self._gpg_key_exists(key_fingerprint) return found def _gpg_key_exists(self, key_fingerprint): found = False keyfiles = ['/etc/apt/trusted.gpg'] # main gpg repo for apt for other_dir in APT_KEY_DIRS: # add other known sources of gpg sigs for apt, skip hidden files keyfiles.extend([os.path.join(other_dir, x) for x in os.listdir(other_dir) if not x.startswith('.')]) for key_file in keyfiles: if os.path.exists(key_file): try: rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file]) except OSError as ex: self.module.debug(f"Could check key against file {key_file!r}: {ex}") continue if key_fingerprint in out: found = True break return found # https://www.linuxuprising.com/2021/01/apt-key-is-deprecated-how-to-add.html def add_source(self, line, comment='', file=None): if line.startswith('ppa:'): source, ppa_owner, ppa_name = self._expand_ppa(line) if source in self.repos_urls: # repository already exists return info = self._get_ppa_info(ppa_owner, ppa_name) # add gpg sig if needed if not self._key_already_exists(info['signing_key_fingerprint']): # TODO: report file that would have been added if not check_mode keyfile = '' if not self.module.check_mode: if self.apt_key_bin: command = [self.apt_key_bin, 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']] else: # use first available key dir, in order of preference for keydir in APT_KEY_DIRS: if os.path.exists(keydir): break else: self.module.fail_json("Unable to find any existing apt gpgp repo directories, tried the following: %s" % ', '.join(APT_KEY_DIRS)) keyfile = '%s/%s-%s-%s.gpg' % (keydir, os.path.basename(source).replace(' ', '-'), ppa_owner, ppa_name) command = [self.gpg_bin, '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--export', info['signing_key_fingerprint']] rc, stdout, stderr = self.module.run_command(command, check_rc=True, encoding=None) if keyfile: # using gpg we must write keyfile ourselves if len(stdout) == 0: self.module.fail_json(msg='Unable to get required signing key', rc=rc, stderr=stderr, command=command) try: with open(keyfile, 'wb') as f: f.write(stdout) self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile)) except OSError as ex: self.module.fail_json(msg='Unable to add required signing key.', rc=rc, stderr=stderr, error=str(ex), exception=ex) # apt source file file = file or self._suggest_filename('%s_%s' % (line, self.codename)) else: source = self._parse(line, raise_if_invalid_or_disabled=True)[2] file = file or self._suggest_filename(source) self._add_valid_source(source, comment, file) def remove_source(self, line): if line.startswith('ppa:'): source = self._expand_ppa(line)[0] else: source = self._parse(line, raise_if_invalid_or_disabled=True)[2] self._remove_valid_source(source) @property def repos_urls(self): _repositories = [] for parsed_repos in self.files.values(): for parsed_repo in parsed_repos: valid = parsed_repo[1] enabled = parsed_repo[2] source_line = parsed_repo[3] if not valid or not enabled: continue if source_line.startswith('ppa:'): source, ppa_owner, ppa_name = self._expand_ppa(source_line) _repositories.append(source) else: _repositories.append(source_line) return _repositories def revert_sources_list(sources_before, sources_after, sourceslist_before): """Revert the sourcelist files to their previous state.""" # First remove any new files that were created: for filename in set(sources_after.keys()).difference(sources_before.keys()): if os.path.exists(filename): os.remove(filename) # Now revert the existing files to their former state: sourceslist_before.save() def main(): module = AnsibleModule( argument_spec=dict( repo=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), mode=dict(type='raw'), update_cache=dict(type='bool', default=True, aliases=['update-cache']), update_cache_retries=dict(type='int', default=5), update_cache_retry_max_delay=dict(type='int', default=12), filename=dict(type='str'), # This should not be needed, but exists as a failsafe install_python_apt=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), codename=dict(type='str'), ), supports_check_mode=True, ) params = module.params repo = module.params['repo'] state = module.params['state'] update_cache = module.params['update_cache'] # Note: mode is referenced in SourcesList class via the passed in module (self here) sourceslist = None if not HAVE_PYTHON_APT: # This interpreter can't see the apt Python library- we'll do the following to try and fix that: # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it # 2) finding none, try to install a matching python-apt package for the current interpreter version; # we limit to the current interpreter version to try and avoid installing a whole other Python just # for apt support # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be # the current interpreter again, but we'll let it respawn anyway for simplicity) # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be # made any more complex than it already is to try and cover more, eg, custom interpreters taking over # system locations) apt_pkg_name = 'python3-apt' if has_respawned(): # this shouldn't be possible; short-circuit early if it happens... module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable)) interpreters = ['/usr/bin/python3', '/usr/bin/python'] interpreter = probe_interpreters_for_module(interpreters, 'apt') if interpreter: # found the Python bindings; respawn this module under the interpreter where we found them respawn_module(interpreter) # this is the end of the line for this process, it will exit here once the respawned module has completed # don't make changes if we're in check_mode if module.check_mode: module.fail_json(msg="%s must be installed to use check mode. " "If run normally this module can auto-install it." % apt_pkg_name) if params['install_python_apt']: install_python_apt(module, apt_pkg_name) else: module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name) # try again to find the bindings in common places interpreter = probe_interpreters_for_module(interpreters, 'apt') if interpreter: # found the Python bindings; respawn this module under the interpreter where we found them # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code respawn_module(interpreter) # this is the end of the line for this process, it will exit here once the respawned module has completed else: # we've done all we can do; just tell the user it's busted and get out module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable)) if not repo: module.fail_json(msg='Please set argument \'repo\' to a non-empty value') if isinstance(distro, aptsources_distro.Distribution): sourceslist = UbuntuSourcesList(module) else: module.fail_json(msg='Module apt_repository is not supported on target.') sourceslist_before = copy.deepcopy(sourceslist) sources_before = sourceslist.dump() try: if state == 'present': sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) except InvalidSource as ex: module.fail_json(msg='Invalid repository string: %s' % to_native(ex)) sources_after = sourceslist.dump() changed = sources_before != sources_after diff = [] sources_added = set() sources_removed = set() if changed: sources_added = set(sources_after.keys()).difference(sources_before.keys()) sources_removed = set(sources_before.keys()).difference(sources_after.keys()) if module._diff: for filename in set(sources_added.union(sources_removed)): diff.append({'before': sources_before.get(filename, ''), 'after': sources_after.get(filename, ''), 'before_header': (filename, '/dev/null')[filename not in sources_before], 'after_header': (filename, '/dev/null')[filename not in sources_after]}) if changed and not module.check_mode: try: err = '' sourceslist.save() if update_cache: update_cache_retries = module.params.get('update_cache_retries') update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay') randomize = secrets.randbelow(1000) / 1000.0 cache = apt.Cache() for retry in range(update_cache_retries): try: cache.update() break except apt.cache.FetchFailedException as fetch_failed_exc: err = fetch_failed_exc module.warn( f"Failed to update cache after {retry + 1} due " f"to {to_native(fetch_failed_exc)} retry, retrying" ) # Use exponential backoff with a max fail count, plus a little bit of randomness delay = 2 ** retry + randomize if delay > update_cache_retry_max_delay: delay = update_cache_retry_max_delay + randomize time.sleep(delay) module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to update the cache again") else: revert_sources_list(sources_before, sources_after, sourceslist_before) msg = ( f"Failed to update apt cache after {update_cache_retries} retries: " f"{err if err else 'unknown reason'}" ) module.fail_json(msg=msg) except OSError as ex: revert_sources_list(sources_before, sources_after, sourceslist_before) raise module.exit_json(changed=changed, repo=repo, sources_added=sources_added, sources_removed=sources_removed, state=state, diff=diff) if __name__ == '__main__': main()
UbuntuSourcesList
python
MongoEngine__mongoengine
mongoengine/context_managers.py
{ "start": 5880, "end": 12184 }
class ____: """Query_counter context manager to get the number of queries. This works by updating the `profiling_level` of the database so that all queries get logged, resetting the db.system.profile collection at the beginning of the context and counting the new entries. This was designed for debugging purpose. In fact it is a global counter so queries issued by other threads/processes can interfere with it Usage: .. code-block:: python class User(Document): name = StringField() with query_counter() as q: user = User(name='Bob') assert q == 0 # no query fired yet user.save() assert q == 1 # 1 query was fired, an 'insert' user_bis = User.objects().first() assert q == 2 # a 2nd query was fired, a 'find_one' Be aware that: - Iterating over large amount of documents (>101) makes pymongo issue `getmore` queries to fetch the next batch of documents (https://www.mongodb.com/docs/manual/tutorial/iterate-a-cursor/#cursor-batches) - Some queries are ignored by default by the counter (killcursors, db.system.indexes) """ def __init__(self, alias=DEFAULT_CONNECTION_NAME): self.db = get_db(alias=alias) self.initial_profiling_level = None self._ctx_query_counter = 0 # number of queries issued by the context self._ignored_query = { "ns": {"$ne": "%s.system.indexes" % self.db.name}, "op": {"$ne": "killcursors"}, # MONGODB < 3.2 "command.killCursors": {"$exists": False}, # MONGODB >= 3.2 } def _turn_on_profiling(self): profile_update_res = self.db.command({"profile": 0}, session=_get_session()) self.initial_profiling_level = profile_update_res["was"] self.db.system.profile.drop() self.db.command({"profile": 2}, session=_get_session()) def _resets_profiling(self): self.db.command({"profile": self.initial_profiling_level}) def __enter__(self): self._turn_on_profiling() return self def __exit__(self, t, value, traceback): self._resets_profiling() def __eq__(self, value): counter = self._get_count() return value == counter def __ne__(self, value): return not self.__eq__(value) def __lt__(self, value): return self._get_count() < value def __le__(self, value): return self._get_count() <= value def __gt__(self, value): return self._get_count() > value def __ge__(self, value): return self._get_count() >= value def __int__(self): return self._get_count() def __repr__(self): """repr query_counter as the number of queries.""" return "%s" % self._get_count() def _get_count(self): """Get the number of queries by counting the current number of entries in db.system.profile and substracting the queries issued by this context. In fact everytime this is called, 1 query is issued so we need to balance that """ count = ( count_documents(self.db.system.profile, self._ignored_query) - self._ctx_query_counter ) self._ctx_query_counter += ( 1 # Account for the query we just issued to gather the information ) return count @contextmanager def set_write_concern(collection, write_concerns): combined_concerns = dict(collection.write_concern.document.items()) combined_concerns.update(write_concerns) yield collection.with_options(write_concern=WriteConcern(**combined_concerns)) @contextmanager def set_read_write_concern(collection, write_concerns, read_concerns): combined_write_concerns = dict(collection.write_concern.document.items()) if write_concerns is not None: combined_write_concerns.update(write_concerns) combined_read_concerns = dict(collection.read_concern.document.items()) if read_concerns is not None: combined_read_concerns.update(read_concerns) yield collection.with_options( write_concern=WriteConcern(**combined_write_concerns), read_concern=ReadConcern(**combined_read_concerns), ) def _commit_with_retry(session): while True: try: # Commit uses write concern set at transaction start. session.commit_transaction() break except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): logging.warning( "UnknownTransactionCommitResult, retrying commit operation ..." ) continue else: # Error during commit raise @contextmanager def run_in_transaction( alias=DEFAULT_CONNECTION_NAME, session_kwargs=None, transaction_kwargs=None ): """run_in_transaction context manager Execute queries within the context in a database transaction. Usage: .. code-block:: python class A(Document): name = StringField() with run_in_transaction(): a_doc = A.objects.create(name="a") a_doc.update(name="b") Be aware that: - Mongo transactions run inside a session which is bound to a connection. If you attempt to execute a transaction across a different connection alias, pymongo will raise an exception. In other words: you cannot create a transaction that crosses different database connections. That said, multiple transaction can be nested within the same session for particular connection. For more information regarding pymongo transactions: https://pymongo.readthedocs.io/en/stable/api/pymongo/client_session.html#transactions """ conn = get_connection(alias) session_kwargs = session_kwargs or {} with conn.start_session(**session_kwargs) as session: transaction_kwargs = transaction_kwargs or {} with session.start_transaction(**transaction_kwargs): try: _set_session(session) yield _commit_with_retry(session) finally: _clear_session()
query_counter
python
django__django
tests/filtered_relation/models.py
{ "start": 3053, "end": 3382 }
class ____(models.Model): book = models.ForeignKey(Book, models.CASCADE, related_name="daily_sales") sale_date = models.DateField() currency = models.ForeignKey(Currency, models.CASCADE) seller = models.ForeignKey(Seller, models.CASCADE) sales = models.DecimalField(max_digits=10, decimal_places=2)
BookDailySales
python
pyca__cryptography
src/cryptography/x509/base.py
{ "start": 3908, "end": 3959 }
class ____(utils.Enum): v1 = 0 v3 = 2
Version
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType38.py
{ "start": 363, "end": 1048 }
class ____(Generic[_T]): def __init__(self, _: ClassA[_T]): ... v1 = ClassA(0) v2 = ClassB(v1) v3 = ClassB(ClassA(0)) reveal_type(v1, expected_text="ClassA[int]") reveal_type(v2, expected_text="ClassB[int]") reveal_type(v3, expected_text="ClassB[int]") def func1(x: list[_T], /) -> list[_T]: return x def func2(any: Any): v1 = list([any]) v2 = func1(v1) v3 = func1(list([any])) reveal_type(v1, expected_text="list[Any]") reveal_type(v2, expected_text="list[Any]") reveal_type(v3, expected_text="list[Any]") def func3(val1: Iterator[Iterable[int]]): val2 = list(chain.from_iterable(val1)) reveal_type(val2, expected_text="list[int]")
ClassB
python
tornadoweb__tornado
tornado/test/gen_test.py
{ "start": 18407, "end": 18599 }
class ____(RequestHandler): @gen.coroutine def prepare(self): yield gen.moment raise HTTPError(403) def get(self): self.finish("ok")
AsyncPrepareErrorHandler
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 73146, "end": 74600 }
class ____(rv_continuous): r"""A folded Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `foldcauchy` is: .. math:: f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)} for :math:`x \ge 0` and :math:`c \ge 0`. `foldcauchy` takes ``c`` as a shape parameter for :math:`c`. %(example)s """ def _argcheck(self, c): return c >= 0 def _shape_info(self): return [_ShapeInfo("c", False, (0, np.inf), (True, False))] def _rvs(self, c, size=None, random_state=None): return abs(cauchy.rvs(loc=c, size=size, random_state=random_state)) def _pdf(self, x, c): # foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2)) return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2)) def _cdf(self, x, c): return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c)) def _sf(self, x, c): # 1 - CDF(x, c) = 1 - (atan(x - c) + atan(x + c))/pi # = ((pi/2 - atan(x - c)) + (pi/2 - atan(x + c)))/pi # = (acot(x - c) + acot(x + c))/pi # = (atan2(1, x - c) + atan2(1, x + c))/pi return (np.arctan2(1, x - c) + np.arctan2(1, x + c))/np.pi def _stats(self, c): return np.inf, np.inf, np.nan, np.nan foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
foldcauchy_gen
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/event.py
{ "start": 1070, "end": 1318 }
class ____(BaseEvent): """Event dispatched when file processing starts.""" file_path: str file_type: str @classmethod def class_name(cls) -> str: return "GitHubFileProcessingStartedEvent"
GitHubFileProcessingStartedEvent
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py
{ "start": 2562, "end": 5394 }
class ____: """Helper class for preprocess of transfer job body.""" def __init__( self, body: dict, aws_conn_id: str | None = "aws_default", default_schedule: bool = False ) -> None: self.body = body self.aws_conn_id = aws_conn_id self.default_schedule = default_schedule def _inject_aws_credentials(self) -> None: if TRANSFER_SPEC not in self.body: return if AWS_S3_DATA_SOURCE not in self.body[TRANSFER_SPEC]: return if AWS_ROLE_ARN in self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE]: return aws_hook = AwsBaseHook(self.aws_conn_id, resource_type="s3") aws_credentials = aws_hook.get_credentials() aws_access_key_id = aws_credentials.access_key aws_secret_access_key = aws_credentials.secret_key self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE][AWS_ACCESS_KEY] = { ACCESS_KEY_ID: aws_access_key_id, SECRET_ACCESS_KEY: aws_secret_access_key, } def _reformat_date(self, field_key: str) -> None: schedule = self.body[SCHEDULE] if field_key not in schedule: return if isinstance(schedule[field_key], date): schedule[field_key] = self._convert_date_to_dict(schedule[field_key]) def _reformat_time(self, field_key: str) -> None: schedule = self.body[SCHEDULE] if field_key not in schedule: return if isinstance(schedule[field_key], time): schedule[field_key] = self._convert_time_to_dict(schedule[field_key]) def _reformat_schedule(self) -> None: if SCHEDULE not in self.body: if self.default_schedule: self.body[SCHEDULE] = {SCHEDULE_START_DATE: date.today(), SCHEDULE_END_DATE: date.today()} else: return self._reformat_date(SCHEDULE_START_DATE) self._reformat_date(SCHEDULE_END_DATE) self._reformat_time(START_TIME_OF_DAY) def process_body(self) -> dict: """ Injects AWS credentials into body if needed and reformats schedule information. :return: Preprocessed body """ self._inject_aws_credentials() self._reformat_schedule() return self.body @staticmethod def _convert_date_to_dict(field_date: date) -> dict: """Convert native python ``datetime.date`` object to a format supported by the API.""" return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year} @staticmethod def _convert_time_to_dict(time_object: time) -> dict: """Convert native python ``datetime.time`` object to a format supported by the API.""" return {HOURS: time_object.hour, MINUTES: time_object.minute, SECONDS: time_object.second}
TransferJobPreprocessor
python
boto__boto3
tests/unit/resources/test_action.py
{ "start": 6076, "end": 10141 }
class ____(BaseTestCase): def setUp(self): super().setUp() self.action_def = {'request': {'operation': 'GetFrobs', 'params': []}} @property def model(self): return Action('test', self.action_def, {}) def test_batch_action_gets_pages_from_collection(self): collection = mock.Mock() collection.pages.return_value = [] action = BatchAction(self.model) action(collection) collection.pages.assert_called_with() def test_batch_action_creates_parameters_from_items(self): self.action_def['request']['params'] = [ {'target': 'Bucket', 'source': 'data', 'path': 'BucketName'}, { 'target': 'Delete.Objects[].Key', 'source': 'data', 'path': 'Key', }, ] client = mock.Mock() item1 = mock.Mock() item1.meta = ResourceMeta( 'test', client=client, data={'BucketName': 'bucket', 'Key': 'item1'}, ) item2 = mock.Mock() item2.meta = ResourceMeta( 'test', client=client, data={'BucketName': 'bucket', 'Key': 'item2'}, ) collection = mock.Mock() collection.pages.return_value = [[item1, item2]] action = BatchAction(self.model) action(collection) client.get_frobs.assert_called_with( Bucket='bucket', Delete={'Objects': [{'Key': 'item1'}, {'Key': 'item2'}]}, ) @mock.patch( 'boto3.resources.action.create_request_parameters', return_value={} ) def test_batch_action_skips_operation(self, crp_mock): # In this test we have an item from the collection, but no # parameters are set up. Because of this, we do NOT call # the batch operation. client = mock.Mock() item = mock.Mock() item.meta = ResourceMeta('test', client=client) collection = mock.Mock() collection.pages.return_value = [[item]] model = self.model action = BatchAction(model) action(collection) crp_mock.assert_called_with(item, model.request, params={}, index=0) client.get_frobs.assert_not_called() @mock.patch('boto3.resources.action.create_request_parameters') def test_batch_action_calls_operation(self, crp_mock): # In this test we have an item and parameters, so the call # to the batch operation should be made. def side_effect(resource, model, params=None, index=None): params['foo'] = 'bar' crp_mock.side_effect = side_effect client = mock.Mock() item = mock.Mock() item.meta = ResourceMeta('test', client=client) collection = mock.Mock() collection.pages.return_value = [[item]] model = self.model action = BatchAction(model) action(collection) # Here the call is made with params={}, but they are edited # in-place so we need to compare to the final edited value. crp_mock.assert_called_with( item, model.request, params={'foo': 'bar'}, index=0 ) client.get_frobs.assert_called_with(foo='bar') @mock.patch('boto3.resources.action.create_request_parameters') def test_batch_action_with_positional_argument(self, crp_mock): def side_effect(resource, model, params=None, index=None): params['foo'] = 'bar' def _api_call(*args, **kwargs): if args: raise TypeError("get_frobs() only accepts keyword arguments.") crp_mock.side_effect = side_effect client = mock.Mock() client.get_frobs = _api_call item = mock.Mock() item.meta = ResourceMeta('test', client=client) collection = mock.Mock() collection.pages.return_value = [[item]] model = self.model action = BatchAction(model) with pytest.raises(TypeError): action(collection, 'item1')
TestBatchActionCall
python
huggingface__transformers
src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py
{ "start": 58903, "end": 61775 }
class ____(nn.Module): def __init__(self, config: Phi4MultimodalConfig): super().__init__() self.config = config self.layer_idx = config.audio_config.feature_layer self.drop = nn.Dropout(config.embd_pdrop) self.encoder = Phi4MultimodalAudioModel._from_config(config.audio_config) self.up_proj_for_speech = nn.Linear( config.audio_config.hidden_size * config.audio_config.downsample_rate, config.hidden_size ) self.down_proj_for_speech = nn.Linear(config.hidden_size, config.hidden_size) self.up_proj_for_vision_speech = nn.Linear( config.audio_config.hidden_size * config.audio_config.downsample_rate, config.hidden_size ) self.down_proj_for_vision_speech = nn.Linear(config.hidden_size, config.hidden_size) def forward( self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, audio_input_features: torch.FloatTensor, audio_embed_sizes=None, audio_attention_mask=None, audio_projection_mode="speech", ) -> torch.FloatTensor: with torch.no_grad(): positions_tuple = torch.nonzero(input_ids == self.config.audio_config.audio_token_id, as_tuple=True) up_proj = self.up_proj_for_speech if audio_projection_mode == "speech" else self.up_proj_for_vision_speech down_proj = ( self.down_proj_for_speech if audio_projection_mode == "speech" else self.down_proj_for_vision_speech ) target_device = up_proj.bias.device target_dtype = up_proj.bias.dtype audio_input_features = audio_input_features.to(device=target_device, dtype=target_dtype) audio_encoder_hidden_states = self.encoder(audio_input_features, audio_attention_mask) audio_encoder_hidden_states = up_proj(audio_encoder_hidden_states) audio_encoder_hidden_states = nn.functional.gelu(audio_encoder_hidden_states) audio_embeds = down_proj(audio_encoder_hidden_states) merged_audio_embeds = torch.cat( [audio_embeds[i, : audio_embed_sizes[i], :] for i in range(len(audio_embed_sizes))], dim=0 ) merged_audio_embeds = merged_audio_embeds.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device) # Temporarily disable autocast to avoid issue on bf16 tensors # Ref: https://github.com/pytorch/pytorch/issues/132715 with torch.autocast(device_type=inputs_embeds.device.type, enabled=False): audio_embeds = inputs_embeds.index_put( indices=positions_tuple, values=merged_audio_embeds, accumulate=False ) audio_embeds = self.drop(audio_embeds) return audio_embeds #################################################### TEXT ####################################################
Phi4MultimodalAudioEmbedding
python
apache__thrift
lib/py/test/test_sslsocket.py
{ "start": 1752, "end": 3591 }
class ____(threading.Thread): def __init__(self, server, expect_failure=False): super(ServerAcceptor, self).__init__() self.daemon = True self._server = server self._listening = threading.Event() self._port = None self._port_bound = threading.Event() self._client = None self._client_accepted = threading.Event() self._expect_failure = expect_failure frame = inspect.stack(3)[2] self.name = frame[3] del frame def run(self): self._server.listen() self._listening.set() try: address = self._server.handle.getsockname() if len(address) > 1: # AF_INET addresses are 2-tuples (host, port) and AF_INET6 are # 4-tuples (host, port, ...), but in each case port is in the second slot. self._port = address[1] finally: self._port_bound.set() try: self._client = self._server.accept() if self._client: data = self._client.read(5) # hello/sleep if data == b"sleep": time.sleep(2) self._client.write(b"there") except Exception: logging.exception('error on server side (%s):' % self.name) if not self._expect_failure: raise finally: self._client_accepted.set() def await_listening(self): self._listening.wait() @property def port(self): self._port_bound.wait() return self._port @property def client(self): self._client_accepted.wait() return self._client def close(self): if self._client: self._client.close() self._server.close() # Python 2.6 compat
ServerAcceptor
python
getsentry__sentry
tests/sentry/integrations/slack/webhooks/actions/test_enable_notifications.py
{ "start": 403, "end": 4615 }
class ____(BaseEventTest): def setUp(self) -> None: super().setUp() self.slack_id = "UXXXXXXX1" self.team_id = "TXXXXXXX1" def test_enable_all_slack_no_identity(self) -> None: with assume_test_silo_mode(SiloMode.CONTROL): Identity.objects.delete_identity( user=self.user, idp=self.idp, external_id=self.external_id, ) response = self.post_webhook( action_data=[{"name": "enable_notifications", "value": "all_slack"}] ) assert response.status_code == 200, response.content assert response.data["text"] == NO_IDENTITY_MESSAGE def test_enable_all_slack_already_enabled(self) -> None: provider = self.create_notification_settings_provider( user_id=self.user.id, scope_type="user", scope_identifier=self.user.id, type="alerts", provider="slack", value="never", ) response = self.post_webhook( action_data=[{"name": "enable_notifications", "value": "all_slack"}] ) assert response.status_code == 200, response.content assert response.data["text"] == ENABLE_SLACK_SUCCESS_MESSAGE self.user.refresh_from_db() # Reload to fetch actor provider.refresh_from_db() assert provider.value == "always" def test_enable_all_slack(self) -> None: with assume_test_silo_mode(SiloMode.CONTROL): assert not NotificationSettingProvider.objects.all().exists() response = self.post_webhook( action_data=[{"name": "enable_notifications", "value": "all_slack"}] ) self.user.refresh_from_db() # Reload to fetch actor assert response.status_code == 200, response.content assert response.data["text"] == ENABLE_SLACK_SUCCESS_MESSAGE with assume_test_silo_mode(SiloMode.CONTROL): provider = NotificationSettingProvider.objects.get( user_id=self.user.id, scope_type="user", scope_identifier=self.user.id, type="alerts", provider="slack", ) assert provider.value == "always" def test_enable_all_slack_block_kit(self) -> None: with assume_test_silo_mode(SiloMode.CONTROL): assert not NotificationSettingProvider.objects.all().exists() original_message = { "blocks": [ { "type": "section", "text": { "type": "mrkdwn", "text": "Check your email lately? We didn't think so. Get Sentry notifications in Slack.", }, }, { "type": "actions", "elements": [ { "type": "button", "text": { "type": "plain_text", "text": "Turn on personal notifications", }, "action_id": "enable_notifications", "value": "all_slack", } ], }, ] } response = self.post_webhook_block_kit( action_data=[{"name": "enable_notifications", "value": "all_slack"}], original_message=original_message, data={"callback_id": orjson.dumps({"enable_notifications": True}).decode()}, ) self.user.refresh_from_db() # Reload to fetch actor assert response.status_code == 200, response.content assert response.data["text"] == ENABLE_SLACK_SUCCESS_MESSAGE with assume_test_silo_mode(SiloMode.CONTROL): provider = NotificationSettingProvider.objects.get( user_id=self.user.id, scope_type="user", scope_identifier=self.user.id, type="alerts", provider="slack", ) assert provider.value == "always"
EnableNotificationsActionTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-twilio/components.py
{ "start": 2917, "end": 3881 }
class ____(StateMigration): """ Migrates legacy `alerts` state to low-code shape. Previously, the stream incorrectly used per partition state. Initial: { "states" : [ { "partition" : {}, "cursor" : { "date_generated" : "2025-08-05T16:43:50Z" } } ] } Final: { "date_generated" : "2025-08-05T16:43:50Z" } """ def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]: return stream_state["states"][0]["cursor"] def should_migrate(self, stream_state: Mapping[str, Any]) -> bool: if ( stream_state and "states" in stream_state and stream_state["states"] and "cursor" in stream_state["states"][0] and "date_generated" in stream_state["states"][0]["cursor"] ): return True return False
TwilioAlertsStateMigration
python
pytorch__pytorch
test/distributed/fsdp/test_fsdp_core.py
{ "start": 14699, "end": 16200 }
class ____(FSDPTest): @skip_if_lt_x_gpu(2) @parametrize("mixed_precision", [True, False]) def test_transformer_no_grad(self, mixed_precision): """Tests that for an FSDP-wrapped transformer model with shared parameters, after training for one iteration, running a forward pass in ``eval()`` mode gives the same output as running a forward pass in ``torch.no_grad()``.""" fsdp_kwargs = {"device_id": device_type.type} if mixed_precision: fsdp_kwargs["mixed_precision"] = MixedPrecision( param_dtype=torch.float16, reduce_dtype=torch.float16, buffer_dtype=torch.float16, ) else: fsdp_kwargs["mixed_precision"] = None fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, DEVICEInitMode.DEVICE_AFTER, fsdp_kwargs, ) self._train_for_several_steps( fsdp_model, num_steps=1, autocast=False, mixed_precision=fsdp_kwargs["mixed_precision"], ) input = fsdp_model.module.get_input(device_type) # Run a forward in eval mode fsdp_model.eval() ref_output = fsdp_model(*input) # Run a forward in `no_grad()` and compare with torch.no_grad(): no_grad_output = fsdp_model(*input) self.assertEqual(ref_output, no_grad_output)
TestNoGrad
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1070295, "end": 1082840 }
class ____(sgqlc.types.Type, Node): """A branch protection rule.""" __schema__ = github_schema __field_names__ = ( "allows_deletions", "allows_force_pushes", "blocks_creations", "branch_protection_rule_conflicts", "bypass_force_push_allowances", "bypass_pull_request_allowances", "creator", "database_id", "dismisses_stale_reviews", "is_admin_enforced", "lock_allows_fetch_and_merge", "lock_branch", "matching_refs", "pattern", "push_allowances", "repository", "require_last_push_approval", "required_approving_review_count", "required_deployment_environments", "required_status_check_contexts", "required_status_checks", "requires_approving_reviews", "requires_code_owner_reviews", "requires_commit_signatures", "requires_conversation_resolution", "requires_deployments", "requires_linear_history", "requires_status_checks", "requires_strict_status_checks", "restricts_pushes", "restricts_review_dismissals", "review_dismissal_allowances", ) allows_deletions = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="allowsDeletions") """Can this branch be deleted.""" allows_force_pushes = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="allowsForcePushes") """Are force pushes allowed on this branch.""" blocks_creations = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="blocksCreations") """Is branch creation a protected operation.""" branch_protection_rule_conflicts = sgqlc.types.Field( sgqlc.types.non_null(BranchProtectionRuleConflictConnection), graphql_name="branchProtectionRuleConflicts", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list of conflicts matching branches protection rule and other branch protection rules Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ bypass_force_push_allowances = sgqlc.types.Field( sgqlc.types.non_null(BypassForcePushAllowanceConnection), graphql_name="bypassForcePushAllowances", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list of actors able to force push for this branch protection rule. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ bypass_pull_request_allowances = sgqlc.types.Field( sgqlc.types.non_null(BypassPullRequestAllowanceConnection), graphql_name="bypassPullRequestAllowances", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list of actors able to bypass PRs for this branch protection rule. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ creator = sgqlc.types.Field(Actor, graphql_name="creator") """The actor who created this branch protection rule.""" database_id = sgqlc.types.Field(Int, graphql_name="databaseId") """Identifies the primary key from the database.""" dismisses_stale_reviews = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="dismissesStaleReviews") """Will new commits pushed to matching branches dismiss pull request review approvals. """ is_admin_enforced = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isAdminEnforced") """Can admins overwrite branch protection.""" lock_allows_fetch_and_merge = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="lockAllowsFetchAndMerge") """Whether users can pull changes from upstream when the branch is locked. Set to `true` to allow fork syncing. Set to `false` to prevent fork syncing. """ lock_branch = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="lockBranch") """Whether to set the branch as read-only. If this is true, users will not be able to push to the branch. """ matching_refs = sgqlc.types.Field( sgqlc.types.non_null(RefConnection), graphql_name="matchingRefs", args=sgqlc.types.ArgDict( ( ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """Repository refs that are protected by this rule Arguments: * `query` (`String`): Filters refs with query on name * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern") """Identifies the protection rule pattern.""" push_allowances = sgqlc.types.Field( sgqlc.types.non_null(PushAllowanceConnection), graphql_name="pushAllowances", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list push allowances for this branch protection rule. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ repository = sgqlc.types.Field("Repository", graphql_name="repository") """The repository associated with this branch protection rule.""" require_last_push_approval = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requireLastPushApproval") """Whether the most recent push must be approved by someone other than the person who pushed it """ required_approving_review_count = sgqlc.types.Field(Int, graphql_name="requiredApprovingReviewCount") """Number of approving reviews required to update matching branches.""" required_deployment_environments = sgqlc.types.Field(sgqlc.types.list_of(String), graphql_name="requiredDeploymentEnvironments") """List of required deployment environments that must be deployed successfully to update matching branches """ required_status_check_contexts = sgqlc.types.Field(sgqlc.types.list_of(String), graphql_name="requiredStatusCheckContexts") """List of required status check contexts that must pass for commits to be accepted to matching branches. """ required_status_checks = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(RequiredStatusCheckDescription)), graphql_name="requiredStatusChecks" ) """List of required status checks that must pass for commits to be accepted to matching branches. """ requires_approving_reviews = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresApprovingReviews") """Are approving reviews required to update matching branches.""" requires_code_owner_reviews = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresCodeOwnerReviews") """Are reviews from code owners required to update matching branches.""" requires_commit_signatures = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresCommitSignatures") """Are commits required to be signed.""" requires_conversation_resolution = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresConversationResolution") """Are conversations required to be resolved before merging.""" requires_deployments = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresDeployments") """Does this branch require deployment to specific environments before merging """ requires_linear_history = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresLinearHistory") """Are merge commits prohibited from being pushed to this branch.""" requires_status_checks = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresStatusChecks") """Are status checks required to update matching branches.""" requires_strict_status_checks = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresStrictStatusChecks") """Are branches required to be up to date before merging.""" restricts_pushes = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="restrictsPushes") """Is pushing to matching branches restricted.""" restricts_review_dismissals = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="restrictsReviewDismissals") """Is dismissal of pull request reviews restricted.""" review_dismissal_allowances = sgqlc.types.Field( sgqlc.types.non_null(ReviewDismissalAllowanceConnection), graphql_name="reviewDismissalAllowances", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """A list review dismissal allowances for this branch protection rule. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """
BranchProtectionRule
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataflow.py
{ "start": 30294, "end": 38385 }
class ____(GoogleCloudBaseOperator): """ Launch a Dataflow YAML job and return the result. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataflowStartYamlJobOperator` .. warning:: This operator requires ``gcloud`` command (Google Cloud SDK) must be installed on the Airflow worker <https://cloud.google.com/sdk/docs/install>`__ :param job_name: Required. The unique name to assign to the Cloud Dataflow job. :param yaml_pipeline_file: Required. Path to a file defining the YAML pipeline to run. Must be a local file or a URL beginning with 'gs://'. :param region: Optional. Region ID of the job's regional endpoint. Defaults to 'us-central1'. :param project_id: Required. The ID of the GCP project that owns the job. If set to ``None`` or missing, the default project_id from the GCP connection is used. :param gcp_conn_id: Optional. The connection ID used to connect to GCP. :param append_job_name: Optional. Set to True if a unique suffix has to be appended to the `job_name`. Defaults to True. :param drain_pipeline: Optional. Set to True if you want to stop a streaming pipeline job by draining it instead of canceling when killing the task instance. Note that this does not work for batch pipeline jobs or in the deferrable mode. Defaults to False. For more info see: https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline :param deferrable: Optional. Run operator in the deferrable mode. :param expected_terminal_state: Optional. The expected terminal state of the Dataflow job at which the operator task is set to succeed. Defaults to 'JOB_STATE_DONE' for the batch jobs and 'JOB_STATE_RUNNING' for the streaming jobs. :param poll_sleep: Optional. The time in seconds to sleep between polling Google Cloud Platform for the Dataflow job status. Used both for the sync and deferrable mode. :param cancel_timeout: Optional. How long (in seconds) operator should wait for the pipeline to be successfully canceled when the task is being killed. :param jinja_variables: Optional. A dictionary of Jinja2 variables to be used in reifying the yaml pipeline file. :param options: Optional. Additional gcloud or Beam job parameters. It must be a dictionary with the keys matching the optional flag names in gcloud. The list of supported flags can be found at: `https://cloud.google.com/sdk/gcloud/reference/dataflow/yaml/run`. Note that if a flag does not require a value, then its dictionary value must be either True or None. For example, the `--log-http` flag can be passed as {'log-http': True}. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :return: Dictionary containing the job's data. """ template_fields: Sequence[str] = ( "job_name", "yaml_pipeline_file", "jinja_variables", "options", "region", "project_id", "gcp_conn_id", ) template_fields_renderers = { "jinja_variables": "json", } operator_extra_links = (DataflowJobLink(),) def __init__( self, *, job_name: str, yaml_pipeline_file: str, region: str = DEFAULT_DATAFLOW_LOCATION, project_id: str = PROVIDE_PROJECT_ID, gcp_conn_id: str = "google_cloud_default", append_job_name: bool = True, drain_pipeline: bool = False, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), poll_sleep: int = 10, cancel_timeout: int | None = 5 * 60, expected_terminal_state: str | None = None, jinja_variables: dict[str, str] | None = None, options: dict[str, Any] | None = None, impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.job_name = job_name self.yaml_pipeline_file = yaml_pipeline_file self.region = region self.project_id = project_id self.gcp_conn_id = gcp_conn_id self.append_job_name = append_job_name self.drain_pipeline = drain_pipeline self.deferrable = deferrable self.poll_sleep = poll_sleep self.cancel_timeout = cancel_timeout self.expected_terminal_state = expected_terminal_state self.options = options self.jinja_variables = jinja_variables self.impersonation_chain = impersonation_chain self.job_id: str | None = None def execute(self, context: Context) -> dict[str, Any]: self.job_id = self.hook.launch_beam_yaml_job( job_name=self.job_name, yaml_pipeline_file=self.yaml_pipeline_file, append_job_name=self.append_job_name, options=self.options, jinja_variables=self.jinja_variables, project_id=self.project_id, location=self.region, ) DataflowJobLink.persist( context=context, project_id=self.project_id, region=self.region, job_id=self.job_id ) if self.deferrable: self.defer( trigger=DataflowStartYamlJobTrigger( job_id=self.job_id, project_id=self.project_id, location=self.region, gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, cancel_timeout=self.cancel_timeout, expected_terminal_state=self.expected_terminal_state, impersonation_chain=self.impersonation_chain, ), method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME, ) self.hook.wait_for_done( job_name=self.job_name, location=self.region, project_id=self.project_id, job_id=self.job_id ) job = self.hook.get_job(job_id=self.job_id, location=self.region, project_id=self.project_id) return job def execute_complete(self, context: Context, event: dict) -> dict[str, Any]: """Execute after the trigger returns an event.""" if event["status"] in ("error", "stopped"): self.log.info("status: %s, msg: %s", event["status"], event["message"]) raise AirflowException(event["message"]) job = event["job"] self.log.info("Job %s completed with response %s", job["id"], event["message"]) context["task_instance"].xcom_push(key="job_id", value=job["id"]) return job def on_kill(self): """ Cancel the dataflow job if a task instance gets killed. This method will not be called if a task instance is killed in a deferred state. """ self.log.info("On kill called.") if self.job_id: self.hook.cancel_job( job_id=self.job_id, project_id=self.project_id, location=self.region, ) @cached_property def hook(self) -> DataflowHook: return DataflowHook( gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, impersonation_chain=self.impersonation_chain, drain_pipeline=self.drain_pipeline, cancel_timeout=self.cancel_timeout, expected_terminal_state=self.expected_terminal_state, )
DataflowStartYamlJobOperator
python
pyqtgraph__pyqtgraph
pyqtgraph/graphicsItems/DateAxisItem.py
{ "start": 2639, "end": 4587 }
class ____: """ Specifies the properties for a set of date ticks and computes ticks within a given utc timestamp range """ def __init__(self, spacing, stepper, format, autoSkip=None): """ ============= ========================================================== Arguments spacing approximate (average) tick spacing stepper a stepper function that takes a utc time stamp and a step steps number n to compute the start of the next unit. You can use the makeXStepper functions to create common steppers. format a strftime compatible format string which will be used to convert tick locations to date/time strings autoSkip list of step size multipliers to be applied when the tick density becomes too high. The tick spec automatically applies additional powers of 10 (10, 100, ...) to the list if necessary. Set to None to switch autoSkip off ============= ========================================================== """ self.spacing = spacing self.step = stepper self.format = format self.autoSkip = autoSkip def makeTicks(self, minVal, maxVal, minSpc): ticks = [] n = self.skipFactor(minSpc) x = self.step(minVal, n, first=True) while x <= maxVal: ticks.append(x) x = self.step(x, n, first=False) return (np.array(ticks), n) def skipFactor(self, minSpc): if self.autoSkip is None or minSpc < self.spacing: return 1 factors = np.array(self.autoSkip, dtype=np.float64) while True: for f in factors: spc = self.spacing * f if spc > minSpc: return int(f) factors *= 10
TickSpec
python
pytest-dev__pytest
src/_pytest/outcomes.py
{ "start": 1870, "end": 1990 }
class ____(OutcomeException): """Raised from an explicit call to pytest.fail().""" __module__ = "builtins"
Failed
python
google__pytype
pytype/tests/test_cmp2.py
{ "start": 2727, "end": 3385 }
class ____(test_base.BaseTest): """Tests comparisons on class objects with a custom metaclass.""" def test_compare_types(self): # See b/205755440 - this is the wrong error message to be raising, and the # test should fail once the bug is fixed. For now we test that we don't # crash due to b/205333186. self.CheckWithErrors(""" class Meta(type): def __gt__(self, other): return True # return self.__name__ > other.__name__ class A(metaclass=Meta): pass class B(metaclass=Meta): pass print(A > B) # missing-parameter """) if __name__ == "__main__": test_base.main()
MetaclassTest
python
openai__openai-python
src/openai/types/responses/input_token_count_response.py
{ "start": 200, "end": 310 }
class ____(BaseModel): input_tokens: int object: Literal["response.input_tokens"]
InputTokenCountResponse
python
tiangolo__fastapi
tests/test_union_forms.py
{ "start": 192, "end": 250 }
class ____(BaseModel): name: str email: str
UserForm
python
chroma-core__chroma
chromadb/utils/embedding_functions/bm25_embedding_function.py
{ "start": 464, "end": 8321 }
class ____(SparseEmbeddingFunction[Documents]): def __init__( self, avg_len: Optional[float] = None, task: Optional[TaskType] = "document", cache_dir: Optional[str] = None, k: Optional[float] = None, b: Optional[float] = None, language: Optional[str] = None, token_max_length: Optional[int] = None, disable_stemmer: Optional[bool] = None, specific_model_path: Optional[str] = None, query_config: Optional[Bm25EmbeddingFunctionQueryConfig] = None, **kwargs: Any, ): """Initialize SparseEncoderEmbeddingFunction. Args: avg_len(float, optional): The average length of the documents in the corpus. task (str, optional): Task to perform, can be "document" or "query" cache_dir (str, optional): The path to the cache directory. k (float, optional): The k parameter in the BM25 formula. Defines the saturation of the term frequency. b (float, optional): The b parameter in the BM25 formula. Defines the importance of the document length. language (str, optional): Specifies the language for the stemmer. token_max_length (int, optional): The maximum length of the tokens. disable_stemmer (bool, optional): Disable the stemmer. specific_model_path (str, optional): The path to the specific model. query_config (dict, optional): Configuration for the query, can be "task" **kwargs: Additional arguments to pass to the Bm25 model. """ warnings.warn( "Bm25EmbeddingFunction is deprecated. Please use ChromaBm25EmbeddingFunction instead.", DeprecationWarning, stacklevel=2, ) try: from fastembed.sparse.bm25 import Bm25 except ImportError: raise ValueError( "The fastembed python package is not installed. Please install it with `pip install fastembed`" ) self.task = task self.query_config = query_config self.cache_dir = cache_dir self.k = k self.b = b self.avg_len = avg_len self.language = language self.token_max_length = token_max_length self.disable_stemmer = disable_stemmer self.specific_model_path = specific_model_path for key, value in kwargs.items(): if not isinstance(value, (str, int, float, bool, list, dict, tuple)): raise ValueError(f"Keyword argument {key} is not a primitive type") self.kwargs = kwargs bm25_kwargs = { "model_name": "Qdrant/bm25", } optional_params = { "cache_dir": cache_dir, "k": k, "b": b, "avg_len": avg_len, "language": language, "token_max_length": token_max_length, "disable_stemmer": disable_stemmer, "specific_model_path": specific_model_path, } for key, value in optional_params.items(): if value is not None: bm25_kwargs[key] = value bm25_kwargs.update({k: v for k, v in kwargs.items() if v is not None}) self._model = Bm25(**bm25_kwargs) def __call__(self, input: Documents) -> SparseVectors: """Generate embeddings for the given documents. Args: input: Documents to generate embeddings for. Returns: Embeddings for the documents. """ try: from fastembed.sparse.bm25 import Bm25 except ImportError: raise ValueError( "The fastembed python package is not installed. Please install it with `pip install fastembed`" ) model = cast(Bm25, self._model) if self.task == "document": embeddings = model.embed( list(input), ) elif self.task == "query": embeddings = model.query_embed( list(input), ) else: raise ValueError(f"Invalid task: {self.task}") sparse_vectors: SparseVectors = [] for vec in embeddings: sparse_vectors.append( normalize_sparse_vector( indices=vec.indices.tolist(), values=vec.values.tolist() ) ) return sparse_vectors def embed_query(self, input: Documents) -> SparseVectors: try: from fastembed.sparse.bm25 import Bm25 except ImportError: raise ValueError( "The fastembed python package is not installed. Please install it with `pip install fastembed`" ) model = cast(Bm25, self._model) if self.query_config is not None: task = self.query_config.get("task") if task == "document": embeddings = model.embed( list(input), ) elif task == "query": embeddings = model.query_embed( list(input), ) else: raise ValueError(f"Invalid task: {task}") sparse_vectors: SparseVectors = [] for vec in embeddings: sparse_vectors.append( normalize_sparse_vector( indices=vec.indices.tolist(), values=vec.values.tolist() ) ) return sparse_vectors else: return self.__call__(input) @staticmethod def name() -> str: return "bm25" @staticmethod def build_from_config( config: Dict[str, Any] ) -> "SparseEmbeddingFunction[Documents]": task = config.get("task") query_config = config.get("query_config") cache_dir = config.get("cache_dir") k = config.get("k") b = config.get("b") avg_len = config.get("avg_len") language = config.get("language") token_max_length = config.get("token_max_length") disable_stemmer = config.get("disable_stemmer") specific_model_path = config.get("specific_model_path") kwargs = config.get("kwargs", {}) return Bm25EmbeddingFunction( task=task, query_config=query_config, cache_dir=cache_dir, k=k, b=b, avg_len=avg_len, language=language, token_max_length=token_max_length, disable_stemmer=disable_stemmer, specific_model_path=specific_model_path, **kwargs, ) def get_config(self) -> Dict[str, Any]: return { "task": self.task, "query_config": self.query_config, "cache_dir": self.cache_dir, "k": self.k, "b": self.b, "avg_len": self.avg_len, "language": self.language, "token_max_length": self.token_max_length, "disable_stemmer": self.disable_stemmer, "specific_model_path": self.specific_model_path, "kwargs": self.kwargs, } def validate_config_update( self, old_config: Dict[str, Any], new_config: Dict[str, Any] ) -> None: # Users should be able to change the path if needed, so we should not validate that. # e.g. moving file path from /v1/my-model.bin to /v2/my-model.bin return @staticmethod def validate_config(config: Dict[str, Any]) -> None: """ Validate the configuration using the JSON schema. Args: config: Configuration to validate Raises: ValidationError: If the configuration does not match the schema """ validate_config_schema(config, "bm25")
Bm25EmbeddingFunction
python
catalyst-team__catalyst
tests/catalyst/runners/test_reid.py
{ "start": 1863, "end": 5789 }
class ____(dl.SupervisedRunner): """ReidCustomRunner for reid case""" def handle_batch(self, batch: Dict[str, torch.Tensor]) -> None: """ Process batch Args: batch: batch data """ if self.is_train_loader: images, targets = batch["features"].float(), batch["targets"].long() features = self.model(images) self.batch = { "embeddings": features, "targets": targets, } else: images, targets, cids, is_query = ( batch["features"].float(), batch["targets"].long(), batch["cids"].long(), batch["is_query"].bool(), ) features = self.model(images) self.batch = { "embeddings": features, "targets": targets, "cids": cids, "is_query": is_query, } @pytest.mark.parametrize( "input_key,target_key,keys", ( ( "inputs_test", "logits_test", {"inputs_test": "inputs_test", "logits_test": "logits_test"}, ), ( ["test_1", "test_2", "test_3"], ["test_4"], { "test_1": "test_1", "test_2": "test_2", "test_3": "test_3", "test_4": "test_4", }, ), ( {"test_1": "test_2", "test_3": "test_4"}, ["test_5"], {"test_1": "test_2", "test_3": "test_4", "test_5": "test_5"}, ), ( {"test_1": "test_2", "test_3": "test_4"}, {"test_5": "test_6", "test_7": "test_8"}, { "test_1": "test_2", "test_3": "test_4", "test_5": "test_6", "test_7": "test_8", }, ), ), ) def test_format_keys( input_key: Union[str, Iterable[str], Dict[str, str]], target_key: Union[str, Iterable[str], Dict[str, str]], keys: Dict[str, str], ) -> None: """Check MetricCallback converts keys correctly""" accuracy = AccuracyMetric() callback = dl.BatchMetricCallback( metric=accuracy, input_key=input_key, target_key=target_key ) assert callback._keys == keys def test_classification_pipeline(): """ Test if classification pipeline can run and compute metrics. In this test we check that BatchMetricCallback works with AccuracyMetric (ICallbackBatchMetric). """ x = torch.rand(NUM_SAMPLES, NUM_FEATURES) y = (torch.rand(NUM_SAMPLES) * NUM_CLASSES).long() dataset = TensorDataset(x, y) loader = DataLoader(dataset, batch_size=64, num_workers=1) model = DummyModel(num_features=NUM_FEATURES, num_classes=NUM_CLASSES) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters()) runner = dl.SupervisedRunner( input_key="features", output_key="logits", target_key="targets" ) with TemporaryDirectory() as logdir: runner.train( model=model, criterion=criterion, optimizer=optimizer, loaders=OrderedDict({"train": loader, "valid": loader}), logdir=logdir, num_epochs=3, verbose=False, valid_loader="valid", valid_metric="loss", minimize_valid_metric=True, callbacks=OrderedDict( { "classification": dl.BatchMetricCallback( metric=AccuracyMetric(num_classes=NUM_CLASSES), input_key="logits", target_key="targets", ), } ), ) assert "accuracy01" in runner.batch_metrics assert "accuracy01" in runner.loader_metrics
ReIDCustomRunner
python
pennersr__django-allauth
allauth/socialaccount/providers/twitch/views.py
{ "start": 280, "end": 1600 }
class ____(OAuth2Adapter): provider_id = "twitch" access_token_url = "https://id.twitch.tv/oauth2/token" # nosec authorize_url = "https://id.twitch.tv/oauth2/authorize" profile_url = "https://api.twitch.tv/helix/users" def complete_login(self, request, app, token, **kwargs): headers = { "Authorization": "Bearer {}".format(token.token), "Client-ID": app.client_id, } response = ( get_adapter().get_requests_session().get(self.profile_url, headers=headers) ) data = response.json() if response.status_code >= HTTPStatus.BAD_REQUEST: error = data.get("error", "") message = data.get("message", "") raise OAuth2Error("Twitch API Error: %s (%s)" % (error, message)) try: user_info = data.get("data", [])[0] except IndexError: raise OAuth2Error("Invalid data from Twitch API: %s" % (data)) if "id" not in user_info: raise OAuth2Error("Invalid data from Twitch API: %s" % (user_info)) return self.get_provider().sociallogin_from_response(request, user_info) oauth2_login = OAuth2LoginView.adapter_view(TwitchOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(TwitchOAuth2Adapter)
TwitchOAuth2Adapter