language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pypa__warehouse
tests/common/db/oidc.py
{ "start": 2521, "end": 3005 }
class ____(WarehouseFactory): class Meta: model = PendingGooglePublisher id = factory.Faker("uuid4", cast_to=None) project_name = "fake-nonexistent-project" # TODO: Replace when factory_boy supports `unique`. # See https://github.com/FactoryBoy/factory_boy/pull/997 email = factory.Sequence(lambda _: fake.unique.safe_email()) sub = factory.Faker("pystr", max_chars=12) added_by = factory.SubFactory(UserFactory)
PendingGooglePublisherFactory
python
faif__python-patterns
patterns/behavioral/specification.py
{ "start": 1015, "end": 1431 }
class ____(CompositeSpecification): def __init__(self, one: "Specification", other: "Specification") -> None: self._one: Specification = one self._other: Specification = other def is_satisfied_by(self, candidate: Union["User", str]) -> bool: return bool( self._one.is_satisfied_by(candidate) and self._other.is_satisfied_by(candidate) )
AndSpecification
python
readthedocs__readthedocs.org
readthedocs/api/v2/views/model_views.py
{ "start": 21008, "end": 21311 }
class ____(viewsets.ReadOnlyModelViewSet): permission_classes = [IsOwner] renderer_classes = (JSONRenderer,) serializer_class = SocialAccountSerializer model = SocialAccount def get_queryset(self): return self.model.objects.filter(user=self.request.user.pk)
SocialAccountViewSet
python
facelessuser__soupsieve
tests/test_level3/test_nth_child.py
{ "start": 121, "end": 5663 }
class ____(util.TestCase): """Test `nth` child selectors.""" def test_nth_child(self): """Test `nth` child.""" markup = """ <body> <p id="0"></p> <p id="1"></p> <span id="2"></span> <span id="3"></span> <span id="4"></span> <span id="5"></span> <span id="6"></span> <p id="7"></p> <p id="8"></p> <p id="9"></p> <p id="10"></p> <span id="11"></span> </body> """ self.assert_selector( markup, "p:nth-child(-2)", [], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(2)", ['1'], flags=util.HTML ) self.assert_selector( markup, "p:NTH-CHILD(2)", ['1'], flags=util.HTML ) self.assert_selector( markup, r"p:NT\H-CH\ILD(2)", ['1'], flags=util.HTML ) def test_nth_child_odd(self): """Test `nth` child odd.""" markup = """ <body> <p id="0"></p> <p id="1"></p> <span id="2"></span> <span id="3"></span> <span id="4"></span> <span id="5"></span> <span id="6"></span> <p id="7"></p> <p id="8"></p> <p id="9"></p> <p id="10"></p> <span id="11"></span> </body> """ self.assert_selector( markup, "p:nth-child(odd)", ['0', '8', '10'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(ODD)", ['0', '8', '10'], flags=util.HTML ) def test_nth_child_even(self): """Test `nth` child even.""" markup = """ <body> <p id="0"></p> <p id="1"></p> <span id="2"></span> <span id="3"></span> <span id="4"></span> <span id="5"></span> <span id="6"></span> <p id="7"></p> <p id="8"></p> <p id="9"></p> <p id="10"></p> <span id="11"></span> </body> """ self.assert_selector( markup, "p:nth-child(even)", ['1', '7', '9'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(EVEN)", ['1', '7', '9'], flags=util.HTML ) def test_nth_child_complex(self): """Test `nth` child complex.""" markup = """ <body> <p id="0"></p> <p id="1"></p> <span id="2"></span> <span id="3"></span> <span id="4"></span> <span id="5"></span> <span id="6"></span> <p id="7"></p> <p id="8"></p> <p id="9"></p> <p id="10"></p> <span id="11"></span> </body> """ self.assert_selector( markup, "p:nth-child(2n-5)", ['0', '8', '10'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(2N-5)", ['0', '8', '10'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(-2n+20)", ['1', '7', '9'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(50n-20)", [], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(-2n-2)", [], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(9n - 1)", ['7'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(2n + 1)", ['0', '8', '10'], flags=util.HTML ) self.assert_selector( markup, "p:nth-child(-n+3)", ['0', '1'], flags=util.HTML ) self.assert_selector( markup, "span:nth-child(-n+3)", ['2'], flags=util.HTML ) self.assert_selector( markup, "body *:nth-child(-n+3)", ['0', '1', '2'], flags=util.HTML ) def test_nth_child_no_parent(self): """Test `nth` child with no parent.""" markup = """ <body> <p id="0"></p> <p id="1"></p> <span id="2"></span> <span id="3"></span> <span id="4"></span> <span id="5"></span> <span id="6"></span> <p id="7"></p> <p id="8"></p> <p id="9"></p> <p id="10"></p> <span id="11"></span> </body> """ for parser in util.available_parsers('html.parser', 'lxml', 'html5lib'): # Paragraph is the root. There is no document. markup = """<p id="1">text</p>""" soup = self.soup(markup, parser) fragment = soup.p.extract() self.assertTrue(sv.match("p:nth-child(1)", fragment, flags=sv.DEBUG)) def test_nth_child_with_bad_parameters(self): """Test that pseudo class fails with bad parameters (basically it doesn't match).""" self.assert_raises(':nth-child(a)', SelectorSyntaxError)
TestNthChild
python
automl__auto-sklearn
autosklearn/metalearning/metafeatures/metafeatures.py
{ "start": 5525, "end": 5993 }
class ____(HelperFunction): def _calculate(self, X, y, logger, feat_type): missing = pd.isna(X) return missing def _calculate_sparse(self, X, y, logger, feat_type): data = [True if not np.isfinite(x) else False for x in X.data] missing = X.__class__((data, X.indices, X.indptr), shape=X.shape, dtype=bool) return missing @metafeatures.define("NumberOfInstancesWithMissingValues", dependency="MissingValues")
MissingValues
python
airbytehq__airbyte
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
{ "start": 20666, "end": 21539 }
class ____(ByStateRkiCovidStream): """Docs: https://api.corona-zahlen.org/germany/states/history/recovered/:days""" primary_key = None def __init__(self, config, **kwargs): super().__init__(**kwargs) self.start_date = config.get("start_date") def date_to_int(self, start_date) -> int: diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d") if diff.days <= 0: return 1 return diff.days def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: if self.start_date: return "states/history/recovered/" + str(self.date_to_int(self.start_date)) return "states/history/recovered/" # source: states/history/hospitalization/:days | FULL-REFRESH
StatesHistoryRecovered
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_scatter15.py
{ "start": 315, "end": 1423 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_scatter15.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "scatter"}) chart.axis_ids = [58843520, 58845440] data = [ ["X", 1, 3], ["Y", 10, 30], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) chart.add_series( { "categories": "=Sheet1!$A$2:$A$3", "values": "=Sheet1!$B$2:$B$3", } ) chart.set_x_axis( {"name": "=Sheet1!$A$1", "name_font": {"italic": 1, "baseline": -1}} ) chart.set_y_axis({"name": "=Sheet1!$B$1"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
Lightning-AI__lightning
src/lightning/pytorch/profilers/pytorch.py
{ "start": 1811, "end": 4074 }
class ____: """While profiling autograd operations, this class will add labels for module names around the forward function. The Lightning PyTorch Profiler will activate this feature automatically. It can be deactivated as follows: Example:: from lightning.pytorch.profilers import PyTorchProfiler profiler = PyTorchProfiler(record_module_names=False) Trainer(profiler=profiler) It can be used outside of Lightning as follows: Example:: from lightning.pytorch import Trainer, seed_everything with RegisterRecordFunction(model): out = model(batch) """ def __init__(self, model: nn.Module) -> None: self._model = model self._records: dict[str, record_function] = {} self._handles: dict[str, list[RemovableHandle]] = {} def _start_recording_forward(self, _: nn.Module, input: Tensor, record_name: str) -> Tensor: # Add [pl][module] in name for pytorch profiler to recognize record = record_function("[pl][module]" + record_name) record.__enter__() self._records[record_name] = record return input def _stop_recording_forward(self, _: nn.Module, __: Tensor, output: Tensor, record_name: str) -> Tensor: self._records[record_name].__exit__(None, None, None) return output def __enter__(self) -> None: for module_name, module in self._model.named_modules(): if module_name: full_name = f"{type(module).__module__}.{type(module).__name__}" record_name = f"{full_name}: {module_name}" pre_forward_handle = module.register_forward_pre_hook( partial(self._start_recording_forward, record_name=record_name) ) post_forward_handle = module.register_forward_hook( partial(self._stop_recording_forward, record_name=record_name) ) self._handles[module_name] = [pre_forward_handle, post_forward_handle] def __exit__(self, type: Any, value: Any, traceback: Any) -> None: for handles in self._handles.values(): for h in handles: h.remove() self._handles = {}
RegisterRecordFunction
python
ray-project__ray
python/ray/serve/llm/openai_api_models.py
{ "start": 1990, "end": 2258 }
class ____(_CompletionResponse): """CompletionResponse is the response body for the completion API. This model is compatible with vLLM's OpenAI API models. """ pass EmbeddingRequest = _EmbeddingRequest @PublicAPI(stability="alpha")
CompletionResponse
python
pypa__warehouse
warehouse/macaroons/caveats/__init__.py
{ "start": 2238, "end": 2756 }
class ____(Caveat): project_ids: list[StrictStr] def verify(self, request: Request, context: Any, permission: str) -> Result: if not isinstance(context, Project): return Failure("project-scoped token used outside of a project context") if str(context.id) not in self.project_ids: return Failure( f"project-scoped token is not valid for project: {context.name!r}" ) return Success() @as_caveat(tag=3) @dataclass(frozen=True)
ProjectID
python
pytorch__pytorch
torch/profiler/_memory_profiler.py
{ "start": 1517, "end": 1578 }
class ____: device: torch.device @dataclasses.dataclass
Key
python
psf__black
tests/data/miscellaneous/force_pyi.py
{ "start": 673, "end": 808 }
class ____(A, C): ... def spam() -> None: ... @overload def spam(arg: str) -> str: ... var: int = 1 def eggs() -> Union[str, int]: ...
F
python
scikit-learn__scikit-learn
sklearn/preprocessing/_data.py
{ "start": 76334, "end": 80862 }
class ____(OneToOneFeatureMixin, TransformerMixin, BaseEstimator): """Binarize data (set feature values to 0 or 1) according to a threshold. Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, default=0.0 Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : bool, default=True Set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Attributes ---------- n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- binarize : Equivalent function without the estimator API. KBinsDiscretizer : Bin continuous data into intervals. OneHotEncoder : Encode categorical features as a one-hot numeric array. Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the :class:`Binarizer` class. This estimator is :term:`stateless` and does not need to be fitted. However, we recommend to call :meth:`fit_transform` instead of :meth:`transform`, as parameter validation is only performed in :meth:`fit`. Examples -------- >>> from sklearn.preprocessing import Binarizer >>> X = [[ 1., -1., 2.], ... [ 2., 0., 0.], ... [ 0., 1., -1.]] >>> transformer = Binarizer().fit(X) # fit does nothing. >>> transformer Binarizer() >>> transformer.transform(X) array([[1., 0., 1.], [1., 0., 0.], [0., 1., 0.]]) """ _parameter_constraints: dict = { "threshold": [Real], "copy": ["boolean"], } def __init__(self, *, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : None Ignored. Returns ------- self : object Fitted transformer. """ validate_data(self, X, accept_sparse="csr") return self def transform(self, X, copy=None): """Binarize each element of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ copy = copy if copy is not None else self.copy # TODO: This should be refactored because binarize also calls # check_array X = validate_data( self, X, accept_sparse=["csr", "csc"], force_writeable=True, copy=copy, reset=False, ) return binarize(X, threshold=self.threshold, copy=False) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.requires_fit = False tags.array_api_support = True tags.input_tags.sparse = True return tags
Binarizer
python
getsentry__sentry
tests/sentry/releases/endpoints/test_project_releases.py
{ "start": 4775, "end": 12638 }
class ____(APITestCase): def setUp(self) -> None: self.login_as(user=self.user) self.datetime = datetime(2013, 8, 13, 3, 8, 24, tzinfo=UTC) team = self.create_team() project1 = self.create_project(teams=[team], name="foo") project2 = self.create_project(teams=[team], name="bar") env1 = self.make_environment("prod", project1) env2 = self.make_environment("staging", project2) env3 = self.make_environment("test", project1) release1 = Release.objects.create( organization_id=project1.organization_id, version="1", date_added=self.datetime ) release1.add_project(project1) ReleaseProjectEnvironment.objects.create( release_id=release1.id, project_id=project1.id, environment_id=env1.id, first_seen=self.datetime, last_seen=self.datetime, new_issues_count=1, ) release2 = Release.objects.create( organization_id=project2.organization_id, version="2", date_added=self.datetime ) release2.add_project(project2) ReleaseProjectEnvironment.objects.create( release_id=release2.id, project_id=project2.id, environment_id=env2.id, first_seen=self.datetime, last_seen=self.datetime + timedelta(seconds=60), new_issues_count=6, ) release3 = Release.objects.create( organization_id=project1.organization_id, version="3", date_added=self.datetime, date_released=self.datetime, ) release3.add_project(project1) ReleaseProjectEnvironment.objects.create( release_id=release3.id, project_id=project1.id, environment_id=env3.id, first_seen=self.datetime, last_seen=self.datetime + timedelta(days=20), new_issues_count=2, ) release4 = Release.objects.create(organization_id=project2.organization_id, version="4") release4.add_project(project2) self.project1 = project1 self.project2 = project2 self.release1 = release1 self.release2 = release2 self.release3 = release3 self.release4 = release4 self.env1 = env1 self.env2 = env2 self.env3 = env3 def make_environment(self, name, project): env = Environment.objects.create(organization_id=project.organization_id, name=name) env.add_project(project) return env def assert_releases(self, response, releases): assert response.status_code == 200, response.content assert len(response.data) == len(releases) response_versions = sorted(r["version"] for r in response.data) releases_versions = sorted(r.version for r in releases) assert response_versions == releases_versions def assert_release_details(self, release, new_issues_count, first_seen, last_seen): assert release["newGroups"] == new_issues_count assert release["firstEvent"] == first_seen assert release["lastEvent"] == last_seen def test_environments_filter(self) -> None: url = reverse( "sentry-api-0-project-releases", kwargs={ "organization_id_or_slug": self.project1.organization.slug, "project_id_or_slug": self.project1.slug, }, ) response = self.client.get(url + "?environment=" + self.env1.name, format="json") self.assert_releases(response, [self.release1]) response = self.client.get(url + "?environment=" + self.env2.name, format="json") self.assert_releases(response, []) response = self.client.get(url + "?environment=" + self.env3.name, format="json") self.assert_releases(response, [self.release3]) url = reverse( "sentry-api-0-project-releases", kwargs={ "organization_id_or_slug": self.project2.organization.slug, "project_id_or_slug": self.project2.slug, }, ) response = self.client.get(url + "?environment=" + self.env2.name, format="json") self.assert_releases(response, [self.release2]) def test_all_environments(self) -> None: url = reverse( "sentry-api-0-project-releases", kwargs={ "organization_id_or_slug": self.project1.organization.slug, "project_id_or_slug": self.project1.slug, }, ) response = self.client.get(url, format="json") self.assert_releases(response, [self.release1, self.release3]) def test_invalid_environment(self) -> None: url = reverse( "sentry-api-0-project-releases", kwargs={ "organization_id_or_slug": self.project1.organization.slug, "project_id_or_slug": self.project1.slug, }, ) response = self.client.get(url + "?environment=" + "invalid_environment", format="json") self.assert_releases(response, []) def test_new_issues_last_seen_first_seen(self) -> None: def sort_releases_by_version(releases): return sorted(releases, key=lambda release: release["version"]) url = reverse( "sentry-api-0-project-releases", kwargs={ "organization_id_or_slug": self.project1.organization.slug, "project_id_or_slug": self.project1.slug, }, ) ReleaseProjectEnvironment.objects.create( release_id=self.release1.id, project_id=self.project1.id, environment_id=self.env3.id, first_seen=self.datetime + timedelta(seconds=120), last_seen=self.datetime + timedelta(seconds=700), new_issues_count=7, ) # TODO(LB): This is testing all environments but it will not work # given what I did with the release serializer # it will instead rely on tagstore. Not sure how to fix this. # response = self.client.get(url, format='json') # self.assert_releases(response, [self.release1, self.release3]) # releases = sort_releases_by_version(response.data) # self.assert_release_details( # release=releases[0], # new_issues_count=8, # first_seen=self.datetime, # last_seen=self.datetime + timedelta(seconds=700), # ) # self.assert_release_details( # release=releases[1], # new_issues_count=2, # first_seen=self.datetime, # last_seen=self.datetime + timedelta(days=20), # ) response = self.client.get(url + "?environment=" + self.env1.name, format="json") self.assert_releases(response, [self.release1]) releases = sort_releases_by_version(response.data) self.assert_release_details( release=releases[0], new_issues_count=1, first_seen=self.datetime, last_seen=self.datetime, ) response = self.client.get(url + "?environment=" + self.env3.name, format="json") self.assert_releases(response, [self.release1, self.release3]) releases = sort_releases_by_version(response.data) self.assert_release_details( release=releases[0], new_issues_count=7, first_seen=self.datetime + timedelta(seconds=120), last_seen=self.datetime + timedelta(seconds=700), ) self.assert_release_details( release=releases[1], new_issues_count=2, first_seen=self.datetime, last_seen=self.datetime + timedelta(days=20), )
ProjectReleaseListEnvironmentsTest
python
plotly__plotly.py
plotly/graph_objs/indicator/gauge/threshold/_line.py
{ "start": 233, "end": 3013 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "indicator.gauge.threshold" _path_str = "indicator.gauge.threshold.line" _valid_props = {"color", "width"} @property def color(self): """ Sets the color of the threshold line. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def width(self): """ Sets the width (in px) of the threshold line. The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ color Sets the color of the threshold line. width Sets the width (in px) of the threshold line. """ def __init__(self, arg=None, color=None, width=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.indicator.gaug e.threshold.Line` color Sets the color of the threshold line. width Sets the width (in px) of the threshold line. Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.indicator.gauge.threshold.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.indicator.gauge.threshold.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
django__django
tests/flatpages_tests/test_views.py
{ "start": 5199, "end": 7123 }
class ____(TestDataMixin, TestCase): def test_redirect_view_flatpage(self): "A flatpage can be served through a view and should add a slash" response = self.client.get("/flatpage_root/flatpage") self.assertRedirects(response, "/flatpage_root/flatpage/", status_code=301) def test_redirect_view_non_existent_flatpage(self): """ A nonexistent flatpage raises 404 when served through a view and should not add a slash. """ response = self.client.get("/flatpage_root/no_such_flatpage") self.assertEqual(response.status_code, 404) def test_redirect_fallback_flatpage(self): """ A fallback flatpage won't be served if the middleware is disabled and should not add a slash. """ response = self.client.get("/flatpage") self.assertEqual(response.status_code, 404) def test_redirect_fallback_non_existent_flatpage(self): """ A nonexistent flatpage won't be served if the fallback middleware is disabled and should not add a slash. """ response = self.client.get("/no_such_flatpage") self.assertEqual(response.status_code, 404) def test_redirect_view_flatpage_special_chars(self): """ A flatpage with special chars in the URL can be served through a view and should add a slash. """ fp = FlatPage.objects.create( url="/some.very_special~chars-here/", title="A very special page", content="Isn't it special!", enable_comments=False, registration_required=False, ) fp.sites.add(settings.SITE_ID) response = self.client.get("/flatpage_root/some.very_special~chars-here") self.assertRedirects( response, "/flatpage_root/some.very_special~chars-here/", status_code=301 )
FlatpageViewAppendSlashTests
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/fixtures/orm.py
{ "start": 700, "end": 811 }
class ____(TestBase): @config.fixture def fixture_session(self): return fixture_session()
ORMTest
python
sympy__sympy
sympy/codegen/ast.py
{ "start": 12961, "end": 14990 }
class ____(CodegenAST): """ Abstract base class for Assignment and AugmentedAssignment. Attributes: =========== op : str Symbol for assignment operator, e.g. "=", "+=", etc. """ def __new__(cls, lhs, rhs): lhs = _sympify(lhs) rhs = _sympify(rhs) cls._check_args(lhs, rhs) return super().__new__(cls, lhs, rhs) @property def lhs(self): return self.args[0] @property def rhs(self): return self.args[1] @classmethod def _check_args(cls, lhs, rhs): """ Check arguments to __new__ and raise exception if any problems found. Derived classes may wish to override this. """ from sympy.matrices.expressions.matexpr import ( MatrixElement, MatrixSymbol) from sympy.tensor.indexed import Indexed from sympy.tensor.array.expressions import ArrayElement # Tuple of things that can be on the lhs of an assignment assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable, ArrayElement) if not isinstance(lhs, assignable): raise TypeError("Cannot assign to lhs of type %s." % type(lhs)) # Indexed types implement shape, but don't define it until later. This # causes issues in assignment validation. For now, matrices are defined # as anything with a shape that is not an Indexed lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed) rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed) # If lhs and rhs have same structure, then this assignment is ok if lhs_is_mat: if not rhs_is_mat: raise ValueError("Cannot assign a scalar to a matrix.") elif lhs.shape != rhs.shape: raise ValueError("Dimensions of lhs and rhs do not align.") elif rhs_is_mat and not lhs_is_mat: raise ValueError("Cannot assign a matrix to a scalar.")
AssignmentBase
python
huggingface__transformers
src/transformers/models/bridgetower/modeling_bridgetower.py
{ "start": 16190, "end": 16852 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BridgeTower
BridgeTowerIntermediate
python
pennersr__django-allauth
tests/apps/socialaccount/providers/angellist/tests.py
{ "start": 246, "end": 1068 }
class ____(OAuth2TestsMixin, TestCase): provider_id = AngelListProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ {"name":"pennersr","id":424732,"bio":"","follower_count":0, "angellist_url":"https://angel.co/dsxtst", "image":"https://angel.co/images/shared/nopic.png", "email":"raymond.penners@example.com","blog_url":null, "online_bio_url":null,"twitter_url":"https://x.com/dsxtst", "facebook_url":null,"linkedin_url":null,"aboutme_url":null, "github_url":null,"dribbble_url":null,"behance_url":null, "what_ive_built":null,"locations":[],"roles":[],"skills":[], "investor":false,"scopes":["message","talent","dealflow","comment", "email"]} """, ) def get_expected_to_str(self): return "raymond.penners@example.com"
AngelListTests
python
ethereum__web3.py
tests/integration/go_ethereum/test_goethereum_http.py
{ "start": 6725, "end": 6810 }
class ____(GoEthereumAsyncTxPoolModuleTest): pass
TestGoEthereumAsyncTxPoolModuleTest
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/internal/conjecture/datatree.py
{ "start": 13173, "end": 21207 }
class ____: """ A node, or collection of directly descended nodes, in a DataTree. We store the DataTree as a radix tree (https://en.wikipedia.org/wiki/Radix_tree), which means that nodes that are the only child of their parent are collapsed into their parent to save space. Conceptually, you can unfold a single TreeNode storing n values in its lists into a sequence of n nodes, each a child of the last. In other words, (constraints[i], values[i], choice_types[i]) corresponds to the single node at index i. Note that if a TreeNode represents a choice (i.e. the nodes cannot be compacted via the radix tree definition), then its lists will be empty and it will store a `Branch` representing that choce in its `transition`. Examples -------- Consider sequentially drawing a boolean, then an integer. data.draw_boolean() data.draw_integer(1, 3) If we draw True and then 2, the tree may conceptually look like this. ┌──────┐ │ root │ └──┬───┘ ┌──┴───┐ │ True │ └──┬───┘ ┌──┴───┐ │ 2 │ └──────┘ But since 2 is the only child of True, we will compact these nodes and store them as a single TreeNode. ┌──────┐ │ root │ └──┬───┘ ┌────┴──────┐ │ [True, 2] │ └───────────┘ If we then draw True and then 3, True will have multiple children and we can no longer store this compacted representation. We would call split_at(0) on the [True, 2] node to indicate that we need to add a choice at 0-index node (True). ┌──────┐ │ root │ └──┬───┘ ┌──┴───┐ ┌─┤ True ├─┐ │ └──────┘ │ ┌─┴─┐ ┌─┴─┐ │ 2 │ │ 3 │ └───┘ └───┘ """ # The constraints, value, and choice_types of the nodes stored here. These always # have the same length. The values at index i belong to node i. constraints: list[ChoiceConstraintsT] = field(default_factory=list) values: list[ChoiceT] = field(default_factory=list) choice_types: list[ChoiceTypeT] = field(default_factory=list) # The indices of nodes which had forced values. # # Stored as None if no indices have been forced, purely for space saving # reasons (we force quite rarely). __forced: set[int] | None = field(default=None, init=False) # What happens next after drawing these nodes. (conceptually, "what is the # child/children of the last node stored here"). # # One of: # - None (we don't know yet) # - Branch (we have seen multiple possible outcomes here) # - Conclusion (ConjectureData.conclude_test was called here) # - Killed (this branch is valid and may even have children, but should not # be explored when generating novel prefixes) transition: None | Branch | Conclusion | Killed = None # A tree node is exhausted if every possible sequence of draws below it has # been explored. We only update this when performing operations that could # change the answer. # # See also TreeNode.check_exhausted. is_exhausted: bool = field(default=False, init=False) @property def forced(self) -> Set[int]: if not self.__forced: return EMPTY return self.__forced def mark_forced(self, i: int) -> None: """ Note that the draw at node i was forced. """ assert 0 <= i < len(self.values) if self.__forced is None: self.__forced = set() self.__forced.add(i) def split_at(self, i: int) -> None: """ Splits the tree so that it can incorporate a decision at the draw call corresponding to the node at position i. Raises FlakyStrategyDefinition if node i was forced. """ if i in self.forced: raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG) assert not self.is_exhausted key = self.values[i] child = TreeNode( choice_types=self.choice_types[i + 1 :], constraints=self.constraints[i + 1 :], values=self.values[i + 1 :], transition=self.transition, ) self.transition = Branch( constraints=self.constraints[i], choice_type=self.choice_types[i], children={key: child}, ) if self.__forced is not None: child.__forced = {j - i - 1 for j in self.__forced if j > i} self.__forced = {j for j in self.__forced if j < i} child.check_exhausted() del self.choice_types[i:] del self.values[i:] del self.constraints[i:] assert len(self.values) == len(self.constraints) == len(self.choice_types) == i def check_exhausted(self) -> bool: """ Recalculates is_exhausted if necessary, and then returns it. A node is exhausted if: - Its transition is Conclusion or Killed - It has the maximum number of children (i.e. we have found all of its possible children), and all its children are exhausted Therefore, we only need to compute this for a node when: - We first create it in split_at - We set its transition to either Conclusion or Killed (TreeRecordingObserver.conclude_test or TreeRecordingObserver.kill_branch) - We exhaust any of its children """ if ( # a node cannot go from is_exhausted -> not is_exhausted. not self.is_exhausted # if we don't know what happens after this node, we don't have # enough information to tell if it's exhausted. and self.transition is not None # if there are still any nodes left which are the only child of their # parent (len(self.values) > 0), then this TreeNode must be not # exhausted, unless all of those nodes were forced. # # This is because we maintain an invariant of only adding nodes to # DataTree which have at least 2 possible values, so we know that if # they do not have any siblings that we still have more choices to # discover. # # (We actually *do* currently add single-valued nodes to the tree, # but immediately split them into a transition to avoid falsifying # this check. this is a bit of a hack.) and len(self.forced) == len(self.values) ): if isinstance(self.transition, (Conclusion, Killed)): self.is_exhausted = True elif len(self.transition.children) == self.transition.max_children: self.is_exhausted = all( v.is_exhausted for v in self.transition.children.values() ) return self.is_exhausted def _repr_pretty_(self, p: "RepresentationPrinter", cycle: bool) -> None: assert cycle is False indent = 0 for i, (choice_type, constraints, value) in enumerate( zip(self.choice_types, self.constraints, self.values, strict=True) ): with p.indent(indent): if i > 0: p.break_() p.text( _node_pretty( choice_type, value, constraints, forced=i in self.forced ) ) indent += 2 with p.indent(indent): if len(self.values) > 0: p.break_() if self.transition is not None: p.pretty(self.transition) else: p.text("unknown")
TreeNode
python
jazzband__django-oauth-toolkit
oauth2_provider/models.py
{ "start": 1866, "end": 2184 }
class ____(models.CharField): def pre_save(self, model_instance, add): token = getattr(model_instance, "token") checksum = hashlib.sha256(token.encode("utf-8")).hexdigest() setattr(model_instance, self.attname, checksum) return super().pre_save(model_instance, add)
TokenChecksumField
python
getsentry__sentry
src/sentry/incidents/apps.py
{ "start": 36, "end": 244 }
class ____(AppConfig): name = "sentry.incidents" def ready(self) -> None: from . import action_handlers # NOQA from . import events # NOQA from . import receivers # NOQA
Config
python
pytorch__pytorch
test/functorch/test_minifier.py
{ "start": 261, "end": 3563 }
class ____(TestCase): def test_has_mul_minifier(self): def failing_f(x, y): y = y / 3 x = x + 3 x = x * y return x + y inps = [torch.randn(3), torch.randn(3)] failing_f = make_fx(failing_f)(*inps) def has_mul(fx_g, inps): return torch.ops.aten.mul.Tensor in (i.target for i in fx_g.graph.nodes) min_f, inps = minifier(failing_f, inps, has_mul) self.assertEqual(len(min_f.graph.nodes), 4) self.assertEqual(len(inps), 2) def test_has_add_mul(self): def failing_f(x): x = x * 3 x = x + 5 x = x.cos() zero = x - x result = zero / zero result = result + 3 return (result * 2,) inps = [torch.randn(3)] failing_f = make_fx(failing_f)(*inps) def has_nans(fx_g, inps): # Basically, make sure none of the nodes are computing nans for i in inps: if torch.isnan(i).any(): return False return torch.isnan(fx_g(*inps)[0]).any() min_f, inps = minifier(failing_f, inps, has_nans) self.assertEqual(len(min_f.graph.nodes), 3) self.assertEqual(len(inps), 1) def test_input_returned(self): def f(a, b, c): a = a.sin() c = c.cos() d = a * c return (a, b, c, d) inps = [torch.randn(3) for _ in range(3)] def inputs_returned(fx_g, inps): inps = set(get_placeholders(fx_g.graph)) outs = set(get_outputs(fx_g.graph)) return len(inps & outs) > 0 failing_f = make_fx(f)(*inps) min_f, inps = minifier(failing_f, inps, inputs_returned) self.assertEqual(len(min_f.graph.nodes), 2) self.assertEqual(len(inps), 1) def test_tup_use(self): def f(a, b): tup = torch.std_mean(a) return (tup[0] + b * tup[1],) inps = [torch.randn(3), torch.randn(3)] def has_add(fx_g, inps): return torch.ops.aten.add.Tensor in (i.target for i in fx_g.graph.nodes) failing_f = make_fx(f)(*inps) min_f, inps = minifier(failing_f, inps, has_add) self.assertEqual(len(min_f.graph.nodes), 4) self.assertEqual(len(inps), 2) def test_module(self): class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): y = self.relu(x) zero = y - y result = zero / zero result = result + 3 return result mod = MockModule() failing_f = torch.fx.symbolic_trace(mod) inps = [torch.randn(3)] def pass_checker(fx_g, inps): # Basically, make sure none of the inputs are nans for i in inps: if torch.isnan(i).any(): return False return torch.isnan(fx_g(*inps)[0]).any() min_f, inps = minifier(failing_f, inps, pass_checker) assert len(min_f.graph.nodes) == 3 assert len(inps) == 1 if __name__ == "__main__": run_tests()
TestMinifier
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mysql/types.py
{ "start": 19596, "end": 20788 }
class ____(_StringType): """MySQL MEDIUMTEXT type, for character storage encoded up to 2^24 bytes.""" __visit_name__ = "MEDIUMTEXT" def __init__(self, **kwargs: Any): """Construct a MEDIUMTEXT. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super().__init__(**kwargs)
MEDIUMTEXT
python
PrefectHQ__prefect
src/prefect/client/schemas/sorting.py
{ "start": 1182, "end": 1384 }
class ____(AutoEnum): """Defines flow sorting options.""" CREATED_DESC = AutoEnum.auto() UPDATED_DESC = AutoEnum.auto() NAME_ASC = AutoEnum.auto() NAME_DESC = AutoEnum.auto()
FlowSort
python
tensorflow__tensorflow
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
{ "start": 153464, "end": 156970 }
class ____(test.TestCase): shapes = [[1, 5], [2, 6, 5], [5, 3, 6, 2], [100, 100]] seeds = [[2, 1], [16, 12], [1582, 10230], [12, 23101]] key = [[2], [16], [1582], [12]] counters = [[23, 11], [11, 23], [2000312, 0], [0, 0]] itypes = [dtypes.int32, dtypes.uint32, dtypes.int64, dtypes.uint64] dtypes = [dtypes.float32, dtypes.float32, dtypes.half, dtypes.half] def _testStatelessRandomDefault(self, rnfunc, shape, seed, dtype): with test_util.device(use_gpu=False): ref = rnfunc(shape=shape, seed=seed, dtype=dtype) with test_util.device(use_gpu=True): result = rnfunc(shape=shape, seed=seed, dtype=dtype) if dtype == dtypes.float32: self.assertAllClose(result, ref, atol=1e-5) elif dtype == dtypes.float16: self.assertAllClose(result, ref, atol=1e-3) def _testStatelessRandomDefaultV2( self, rnfunc, shape, key, counter, dtype, alg=1 ): with test_util.device(use_gpu=False): ref = rnfunc(shape=shape, key=[key[0]], alg=alg, counter=counter) with test_util.device(use_gpu=True): result = rnfunc(shape=shape, key=[key[0]], alg=alg, counter=counter) self.assertAllClose(result, ref, atol=1e-5) def _testStatelessRandomUniformFullIntV2( self, rnfunc, shape, key, counter, dtype ): with test_util.device(use_gpu=False): ref = rnfunc(shape=shape, alg=1, key=key, counter=counter, dtype=dtype) with test_util.device(use_gpu=True): result = rnfunc(shape=shape, alg=1, key=key, counter=counter, dtype=dtype) self.assertEqual(result.shape, ref.shape) def testRandomUniformCorrectness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefault( raw_ops.StatelessRandomUniform, self.shapes[i], self.seeds[i], self.dtypes[i], ) def testRandomUniformV2Correctness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefaultV2( raw_ops.StatelessRandomUniformV2, self.shapes[i], self.seeds[i], self.counters[i], self.dtypes[i], ) def testRandomNormalCorrectness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefault( raw_ops.StatelessRandomNormal, self.shapes[i], self.seeds[i], self.dtypes[i], ) def testRandomNormalV2Correctness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefaultV2( raw_ops.StatelessRandomNormalV2, self.shapes[i], self.seeds[i], self.counters[i], self.dtypes[i], ) def testTruncatedNormalCorrectness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefault( raw_ops.StatelessTruncatedNormal, self.shapes[i], self.seeds[i], self.dtypes[i], ) def testTruncatedNormalV2Correctness_1(self): for i in range(len(self.shapes)): self._testStatelessRandomDefaultV2( raw_ops.StatelessTruncatedNormalV2, self.shapes[i], self.seeds[i], self.counters[i], self.dtypes[i], ) def testRandomUniformFullIntV2Functional_1(self): for i in range(len(self.shapes)): self._testStatelessRandomUniformFullIntV2( raw_ops.StatelessRandomUniformFullIntV2, self.shapes[i], self.key[i], self.counters[i], self.itypes[i], )
StatelessRandomOpsCorrectnessTestV2
python
networkx__networkx
networkx/generators/tests/test_stochastic.py
{ "start": 111, "end": 2179 }
class ____: """Unit tests for the :func:`~networkx.stochastic_graph` function.""" def test_default_weights(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(0, 2) S = nx.stochastic_graph(G) assert nx.is_isomorphic(G, S) assert sorted(S.edges(data=True)) == [ (0, 1, {"weight": 0.5}), (0, 2, {"weight": 0.5}), ] def test_in_place(self): """Tests for an in-place reweighting of the edges of the graph.""" G = nx.DiGraph() G.add_edge(0, 1, weight=1) G.add_edge(0, 2, weight=1) nx.stochastic_graph(G, copy=False) assert sorted(G.edges(data=True)) == [ (0, 1, {"weight": 0.5}), (0, 2, {"weight": 0.5}), ] def test_arbitrary_weights(self): G = nx.DiGraph() G.add_edge(0, 1, weight=1) G.add_edge(0, 2, weight=1) S = nx.stochastic_graph(G) assert sorted(S.edges(data=True)) == [ (0, 1, {"weight": 0.5}), (0, 2, {"weight": 0.5}), ] def test_multidigraph(self): G = nx.MultiDiGraph() G.add_edges_from([(0, 1), (0, 1), (0, 2), (0, 2)]) S = nx.stochastic_graph(G) d = {"weight": 0.25} assert sorted(S.edges(data=True)) == [ (0, 1, d), (0, 1, d), (0, 2, d), (0, 2, d), ] def test_zero_weights(self): """Smoke test: ensure ZeroDivisionError is not raised.""" G = nx.DiGraph() G.add_edge(0, 1, weight=0) G.add_edge(0, 2, weight=0) S = nx.stochastic_graph(G) assert sorted(S.edges(data=True)) == [ (0, 1, {"weight": 0}), (0, 2, {"weight": 0}), ] def test_graph_disallowed(self): with pytest.raises(nx.NetworkXNotImplemented): nx.stochastic_graph(nx.Graph()) def test_multigraph_disallowed(self): with pytest.raises(nx.NetworkXNotImplemented): nx.stochastic_graph(nx.MultiGraph())
TestStochasticGraph
python
python__mypy
mypy/types.py
{ "start": 47901, "end": 49121 }
class ____(ProperType): """The type of 'None'. This type can be written by users as 'None'. """ __slots__ = () def __init__(self, line: int = -1, column: int = -1) -> None: super().__init__(line, column) def can_be_true_default(self) -> bool: return False def __hash__(self) -> int: return hash(NoneType) def __eq__(self, other: object) -> bool: return isinstance(other, NoneType) def accept(self, visitor: TypeVisitor[T]) -> T: return visitor.visit_none_type(self) def serialize(self) -> JsonDict: return {".class": "NoneType"} @classmethod def deserialize(cls, data: JsonDict) -> NoneType: assert data[".class"] == "NoneType" return NoneType() def write(self, data: WriteBuffer) -> None: write_tag(data, NONE_TYPE) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> NoneType: assert read_tag(data) == END_TAG return NoneType() def is_singleton_type(self) -> bool: return True # NoneType used to be called NoneTyp so to avoid needlessly breaking # external plugins we keep that alias here. NoneTyp = NoneType
NoneType
python
Farama-Foundation__Gymnasium
gymnasium/wrappers/stateful_observation.py
{ "start": 12254, "end": 18931 }
class ____( gym.Wrapper[WrapperObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs, ): """Stacks the observations from the last ``N`` time steps in a rolling manner. For example, if the number of stacks is 4, then the returned observation contains the most recent 4 observations. For environment 'Pendulum-v1', the original observation is an array with shape [3], so if we stack 4 observations, the processed observation has shape [4, 3]. Users have options for the padded observation used: * "reset" (default) - The reset value is repeated * "zero" - A "zero"-like instance of the observation space * custom - An instance of the observation space No vector version of the wrapper exists. Example: >>> import gymnasium as gym >>> from gymnasium.wrappers import FrameStackObservation >>> env = gym.make("CarRacing-v3") >>> env = FrameStackObservation(env, stack_size=4) >>> env.observation_space Box(0, 255, (4, 96, 96, 3), uint8) >>> obs, _ = env.reset() >>> obs.shape (4, 96, 96, 3) Example with different padding observations: >>> env = gym.make("CartPole-v1") >>> env.reset(seed=123) (array([ 0.01823519, -0.0446179 , -0.02796401, -0.03156282], dtype=float32), {}) >>> stacked_env = FrameStackObservation(env, 3) # the default is padding_type="reset" >>> stacked_env.reset(seed=123) (array([[ 0.01823519, -0.0446179 , -0.02796401, -0.03156282], [ 0.01823519, -0.0446179 , -0.02796401, -0.03156282], [ 0.01823519, -0.0446179 , -0.02796401, -0.03156282]], dtype=float32), {}) >>> stacked_env = FrameStackObservation(env, 3, padding_type="zero") >>> stacked_env.reset(seed=123) (array([[ 0. , 0. , 0. , 0. ], [ 0. , 0. , 0. , 0. ], [ 0.01823519, -0.0446179 , -0.02796401, -0.03156282]], dtype=float32), {}) >>> stacked_env = FrameStackObservation(env, 3, padding_type=np.array([1, -1, 0, 2], dtype=np.float32)) >>> stacked_env.reset(seed=123) (array([[ 1. , -1. , 0. , 2. ], [ 1. , -1. , 0. , 2. ], [ 0.01823519, -0.0446179 , -0.02796401, -0.03156282]], dtype=float32), {}) Change logs: * v0.15.0 - Initially add as ``FrameStack`` with support for lz4 * v1.0.0 - Rename to ``FrameStackObservation`` and remove lz4 and ``LazyFrame`` support along with adding the ``padding_type`` parameter """ def __init__( self, env: gym.Env[ObsType, ActType], stack_size: int, *, padding_type: str | ObsType = "reset", ): """Observation wrapper that stacks the observations in a rolling manner. Args: env: The environment to apply the wrapper stack_size: The number of frames to stack. padding_type: The padding type to use when stacking the observations, options: "reset", "zero", custom obs """ gym.utils.RecordConstructorArgs.__init__( self, stack_size=stack_size, padding_type=padding_type ) gym.Wrapper.__init__(self, env) if not np.issubdtype(type(stack_size), np.integer): raise TypeError( f"The stack_size is expected to be an integer, actual type: {type(stack_size)}" ) if not 0 < stack_size: raise ValueError( f"The stack_size needs to be greater than zero, actual value: {stack_size}" ) if isinstance(padding_type, str) and ( padding_type == "reset" or padding_type == "zero" ): self.padding_value: ObsType = create_zero_array(env.observation_space) elif padding_type in env.observation_space: self.padding_value = padding_type padding_type = "_custom" else: if isinstance(padding_type, str): raise ValueError( # we are guessing that the user just entered the "reset" or "zero" wrong f"Unexpected `padding_type`, expected 'reset', 'zero' or a custom observation space, actual value: {padding_type!r}" ) else: raise ValueError( f"Unexpected `padding_type`, expected 'reset', 'zero' or a custom observation space, actual value: {padding_type!r} not an instance of env observation ({env.observation_space})" ) self.observation_space = batch_space(env.observation_space, n=stack_size) self.stack_size: Final[int] = stack_size self.padding_type: Final[str] = padding_type self.obs_queue = deque( [self.padding_value for _ in range(self.stack_size)], maxlen=self.stack_size ) self.stacked_obs = create_empty_array(env.observation_space, n=self.stack_size) def step( self, action: WrapperActType ) -> tuple[WrapperObsType, SupportsFloat, bool, bool, dict[str, Any]]: """Steps through the environment, appending the observation to the frame buffer. Args: action: The action to step through the environment with Returns: Stacked observations, reward, terminated, truncated, and info from the environment """ obs, reward, terminated, truncated, info = self.env.step(action) self.obs_queue.append(obs) updated_obs = deepcopy( concatenate(self.env.observation_space, self.obs_queue, self.stacked_obs) ) return updated_obs, reward, terminated, truncated, info def reset( self, *, seed: int | None = None, options: dict[str, Any] | None = None ) -> tuple[WrapperObsType, dict[str, Any]]: """Reset the environment, returning the stacked observation and info. Args: seed: The environment seed options: The reset options Returns: The stacked observations and info """ obs, info = self.env.reset(seed=seed, options=options) if self.padding_type == "reset": self.padding_value = obs for _ in range(self.stack_size - 1): self.obs_queue.append(self.padding_value) self.obs_queue.append(obs) updated_obs = deepcopy( concatenate(self.env.observation_space, self.obs_queue, self.stacked_obs) ) return updated_obs, info
FrameStackObservation
python
pypa__hatch
tests/cli/publish/test_publish.py
{ "start": 21774, "end": 23542 }
class ____: @pytest.mark.parametrize("field", ["name", "version"]) def test_missing_required_metadata_field(self, hatch, temp_dir_cache, helpers, published_project_name, field): with temp_dir_cache.as_cwd(): result = hatch("new", published_project_name) assert result.exit_code == 0, result.output path = temp_dir_cache / published_project_name with path.as_cwd(): current_version = timestamp_to_version(helpers.get_current_timestamp()) result = hatch("version", current_version) assert result.exit_code == 0, result.output result = hatch("build", "-t", "sdist") assert result.exit_code == 0, result.output build_directory = path / "dist" artifacts = list(build_directory.iterdir()) artifact_path = str(artifacts[0]) extraction_directory = path / "extraction" with tarfile.open(artifact_path, "r:gz") as tar_archive: tar_archive.extractall(extraction_directory, **helpers.tarfile_extraction_compat_options()) metadata_file_path = extraction_directory / f"{published_project_name}-{current_version}" / "PKG-INFO" metadata_file_path.write_text(remove_metadata_field(field, metadata_file_path.read_text())) with tarfile.open(artifact_path, "w:gz") as tar_archive: tar_archive.add(extraction_directory, arcname="") with path.as_cwd(): result = hatch("publish", "--user", "foo", "--auth", "bar") assert result.exit_code == 1, result.output assert result.output == helpers.dedent( f""" Missing required field `{field}` in artifact: {artifact_path} """ )
TestSourceDistribution
python
run-llama__llama_index
llama-index-integrations/postprocessor/llama-index-postprocessor-flashrank-rerank/llama_index/postprocessor/flashrank_rerank/base.py
{ "start": 981, "end": 3277 }
class ____(BaseNodePostprocessor): model: str = Field( description="FlashRank model name.", default="ms-marco-TinyBERT-L-2-v2" ) top_n: int = Field( description="Number of nodes to return sorted by score.", default=20 ) max_length: int = Field( description="Maximum length of passage text passed to the reranker.", default=512, ) _reranker: Ranker = PrivateAttr() @override def model_post_init(self, context: Any, /) -> None: # pyright: ignore[reportAny] self._reranker = Ranker(model_name=self.model, max_length=self.max_length) @classmethod @override def class_name(cls) -> str: return "FlashRankRerank" @dispatcher.span @override def _postprocess_nodes( self, nodes: list[NodeWithScore], query_bundle: QueryBundle | None = None, ) -> list[NodeWithScore]: if query_bundle is None: raise ValueError("Missing query bundle in extra info.") if len(nodes) == 0: return [] query_and_nodes: RerankRequest = RerankRequest( query=query_bundle.query_str, passages=[ { "id": node.node.id_, "text": node.node.get_content(metadata_mode=MetadataMode.EMBED), } for node in nodes ], ) ## you would need to define a custom event subclassing BaseEvent from llama_index_instrumentation dispatcher.event( FlashRerankingQueryEvent( nodes=nodes, model_name=self.model, query_str=query_bundle.query_str, top_k=self.top_n, ) ) scores = self._reranker.rerank(query_and_nodes) scores_by_id = {score["id"]: score["score"] for score in scores} if len(scores) != len(nodes): msg = "Number of scores and nodes do not match." raise ValueError(msg) for node in nodes: node.score = scores_by_id[node.node.id_] new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[ : self.top_n ] dispatcher.event(FlashRerankEndEvent(nodes=new_nodes)) return new_nodes
FlashRankRerank
python
ray-project__ray
python/ray/data/_internal/logical/operators/map_operator.py
{ "start": 9789, "end": 12089 }
class ____(AbstractUDFMap): """Logical operator for filter.""" def __init__( self, input_op: LogicalOperator, predicate_expr: Optional[Expr] = None, fn: Optional[UserDefinedFunction] = None, fn_args: Optional[Iterable[Any]] = None, fn_kwargs: Optional[Dict[str, Any]] = None, fn_constructor_args: Optional[Iterable[Any]] = None, fn_constructor_kwargs: Optional[Dict[str, Any]] = None, compute: Optional[ComputeStrategy] = None, ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, ray_remote_args: Optional[Dict[str, Any]] = None, ): # Ensure exactly one of fn, or predicate_expr is provided provided_params = sum([fn is not None, predicate_expr is not None]) if provided_params != 1: raise ValueError( f"Exactly one of 'fn', or 'predicate_expr' must be provided (received fn={fn}, predicate_expr={predicate_expr})" ) self._predicate_expr = predicate_expr super().__init__( "Filter", input_op, fn=fn, fn_args=fn_args, fn_kwargs=fn_kwargs, fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, compute=compute, ray_remote_args_fn=ray_remote_args_fn, ray_remote_args=ray_remote_args, ) def can_modify_num_rows(self) -> bool: return True def is_expression_based(self) -> bool: return self._predicate_expr is not None def _get_operator_name(self, op_name: str, fn: UserDefinedFunction): if self.is_expression_based(): # Get a concise inline string representation of the expression from ray.data._internal.planner.plan_expression.expression_visitors import ( _InlineExprReprVisitor, ) expr_str = _InlineExprReprVisitor().visit(self._predicate_expr) # Truncate only the final result if too long max_length = 60 if len(expr_str) > max_length: expr_str = expr_str[: max_length - 3] + "..." return f"{op_name}({expr_str})" return super()._get_operator_name(op_name, fn)
Filter
python
django__django
tests/check_framework/urls/bad_class_based_error_handlers.py
{ "start": 19, "end": 277 }
class ____: @classmethod def as_view(cls): def view(): pass return view handler400 = HandlerView.as_view() handler403 = HandlerView.as_view() handler404 = HandlerView.as_view() handler500 = HandlerView.as_view()
HandlerView
python
django__django
django/templatetags/tz.py
{ "start": 315, "end": 2129 }
class ____(datetime): pass # Template filters @register.filter def localtime(value): """ Convert a datetime to local time in the active time zone. This only makes sense within a {% localtime off %} block. """ return do_timezone(value, timezone.get_current_timezone()) @register.filter def utc(value): """ Convert a datetime to UTC. """ return do_timezone(value, UTC) @register.filter("timezone") def do_timezone(value, arg): """ Convert a datetime to local time in a given time zone. The argument must be an instance of a tzinfo subclass or a time zone name. Naive datetimes are assumed to be in local time in the default time zone. """ if not isinstance(value, datetime): return "" # Obtain a timezone-aware datetime try: if timezone.is_naive(value): default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) # Filters must never raise exceptionsm, so catch everything. except Exception: return "" # Obtain a tzinfo instance if isinstance(arg, tzinfo): tz = arg elif isinstance(arg, str): try: tz = zoneinfo.ZoneInfo(arg) except zoneinfo.ZoneInfoNotFoundError: return "" else: return "" result = timezone.localtime(value, tz) # HACK: the convert_to_local_time flag will prevent # automatic conversion of the value to local time. result = datetimeobject( result.year, result.month, result.day, result.hour, result.minute, result.second, result.microsecond, result.tzinfo, ) result.convert_to_local_time = False return result # Template tags
datetimeobject
python
sphinx-doc__sphinx
sphinx/ext/autosummary/generate.py
{ "start": 2883, "end": 3192 }
class ____(NamedTuple): name: str path: str | None template: str recursive: bool def _underline(title: str, line: str = '=') -> str: if '\n' in title: msg = 'Can only underline single lines' raise ValueError(msg) return title + '\n' + line * len(title)
AutosummaryEntry
python
streamlit__streamlit
lib/tests/streamlit/runtime/secrets_test.py
{ "start": 15072, "end": 16567 }
class ____(unittest.TestCase): # The number of threads to run our tests on NUM_THREADS = 50 def setUp(self) -> None: # st.secrets modifies os.environ, so we save it here and # restore in tearDown. self._prev_environ = dict(os.environ) self.secrets = Secrets() def tearDown(self) -> None: os.environ.clear() os.environ.update(self._prev_environ) @patch("streamlit.watcher.path_watcher.watch_file", MagicMock()) @patch("builtins.open", new_callable=mock_open, read_data=MOCK_TOML) def test_access_secrets(self, _): """Accessing secrets is thread-safe.""" def access_secrets(_: int) -> None: assert self.secrets["db_username"] == "Jane" assert self.secrets["subsection"]["email"] == "eng@streamlit.io" assert self.secrets["subsection"].email == "eng@streamlit.io" call_on_threads(access_secrets, num_threads=self.NUM_THREADS) @patch("streamlit.watcher.path_watcher.watch_file", MagicMock()) @patch("builtins.open", new_callable=mock_open, read_data=MOCK_TOML) def test_reload_secrets(self, _): """Re-parsing the secrets file is thread-safe.""" def reload_secrets(_: int) -> None: # Reset secrets, and then access a secret to reparse. self.secrets._reset() assert self.secrets["db_username"] == "Jane" call_on_threads(reload_secrets, num_threads=self.NUM_THREADS)
SecretsThreadingTests
python
django__django
tests/timezones/forms.py
{ "start": 54, "end": 116 }
class ____(forms.Form): dt = forms.DateTimeField()
EventForm
python
pypa__hatch
backend/src/hatchling/metadata/core.py
{ "start": 12321, "end": 52679 }
class ____: """ https://peps.python.org/pep-0621/ """ def __init__( self, root: str, config: dict[str, Any], hatch_metadata: HatchMetadataSettings, context: Context, ) -> None: self.root = root self.config = config self.hatch_metadata = hatch_metadata self.context = context self._raw_name: str | None = None self._name: str | None = None self._version: str | None = None self._description: str | None = None self._readme: str | None = None self._readme_content_type: str | None = None self._readme_path: str | None = None self._requires_python: str | None = None self._python_constraint: SpecifierSet | None = None self._license: str | None = None self._license_expression: str | None = None self._license_files: list[str] | None = None self._authors: list[str] | None = None self._authors_data: dict[str, list[str]] | None = None self._maintainers: list[str] | None = None self._maintainers_data: dict[str, list[str]] | None = None self._keywords: list[str] | None = None self._classifiers: list[str] | None = None self._extra_classifiers: set[str] = set() self._urls: dict[str, str] | None = None self._scripts: dict[str, str] | None = None self._gui_scripts: dict[str, str] | None = None self._entry_points: dict[str, dict[str, str]] | None = None self._dependencies_complex: dict[str, Requirement] | None = None self._dependencies: list[str] | None = None self._optional_dependencies_complex: dict[str, dict[str, Requirement]] | None = None self._optional_dependencies: dict[str, list[str]] | None = None self._dynamic: list[str] | None = None # Indicates that the version has been successfully set dynamically self._version_set: bool = False @property def raw_name(self) -> str: """ https://peps.python.org/pep-0621/#name """ if self._raw_name is None: if "name" in self.dynamic: message = "Static metadata field `name` cannot be present in field `project.dynamic`" raise ValueError(message) raw_name = self.config.get("name", "") if not raw_name: message = "Missing required field `project.name`" raise ValueError(message) if not isinstance(raw_name, str): message = "Field `project.name` must be a string" raise TypeError(message) if not is_valid_project_name(raw_name): message = ( "Required field `project.name` must only contain ASCII letters/digits, underscores, " "hyphens, and periods, and must begin and end with ASCII letters/digits." ) raise ValueError(message) self._raw_name = raw_name return self._raw_name @property def name(self) -> str: """ https://peps.python.org/pep-0621/#name """ if self._name is None: self._name = normalize_project_name(self.raw_name) return self._name @property def version(self) -> str: """ https://peps.python.org/pep-0621/#version """ version: str if self._version is None: if "version" not in self.config: if not self._version_set and "version" not in self.dynamic: message = ( "Field `project.version` can only be resolved dynamically " "if `version` is in field `project.dynamic`" ) raise ValueError(message) else: if "version" in self.dynamic: message = ( "Metadata field `version` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) version = self.config["version"] if not isinstance(version, str): message = "Field `project.version` must be a string" raise TypeError(message) self._version = version return cast(str, self._version) @property def description(self) -> str: """ https://peps.python.org/pep-0621/#description """ if self._description is None: if "description" in self.config: description = self.config["description"] if "description" in self.dynamic: message = ( "Metadata field `description` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: description = "" if not isinstance(description, str): message = "Field `project.description` must be a string" raise TypeError(message) self._description = " ".join(description.splitlines()) return self._description @property def readme(self) -> str: """ https://peps.python.org/pep-0621/#readme """ readme: str | dict[str, str] | None content_type: str | None if self._readme is None: if "readme" in self.config: readme = self.config["readme"] if "readme" in self.dynamic: message = ( "Metadata field `readme` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: readme = None if readme is None: self._readme = "" self._readme_content_type = "text/markdown" self._readme_path = "" elif isinstance(readme, str): normalized_path = readme.lower() if normalized_path.endswith(".md"): content_type = "text/markdown" elif normalized_path.endswith(".rst"): content_type = "text/x-rst" elif normalized_path.endswith(".txt"): content_type = "text/plain" else: message = f"Unable to determine the content-type based on the extension of readme file: {readme}" raise TypeError(message) readme_path = os.path.normpath(os.path.join(self.root, readme)) if not os.path.isfile(readme_path): message = f"Readme file does not exist: {readme}" raise OSError(message) with open(readme_path, encoding="utf-8") as f: self._readme = f.read() self._readme_content_type = content_type self._readme_path = readme elif isinstance(readme, dict): content_type = readme.get("content-type") if content_type is None: message = "Field `content-type` is required in the `project.readme` table" raise ValueError(message) if not isinstance(content_type, str): message = "Field `content-type` in the `project.readme` table must be a string" raise TypeError(message) if content_type not in {"text/markdown", "text/x-rst", "text/plain"}: message = ( "Field `content-type` in the `project.readme` table must be one of the following: " "text/markdown, text/x-rst, text/plain" ) raise ValueError(message) if "file" in readme and "text" in readme: message = "Cannot specify both `file` and `text` in the `project.readme` table" raise ValueError(message) if "file" in readme: relative_path = readme["file"] if not isinstance(relative_path, str): message = "Field `file` in the `project.readme` table must be a string" raise TypeError(message) path = os.path.normpath(os.path.join(self.root, relative_path)) if not os.path.isfile(path): message = f"Readme file does not exist: {relative_path}" raise OSError(message) with open(path, encoding=readme.get("charset", "utf-8")) as f: contents = f.read() readme_path = relative_path elif "text" in readme: contents = readme["text"] if not isinstance(contents, str): message = "Field `text` in the `project.readme` table must be a string" raise TypeError(message) readme_path = "" else: message = "Must specify either `file` or `text` in the `project.readme` table" raise ValueError(message) self._readme = contents self._readme_content_type = content_type self._readme_path = readme_path else: message = "Field `project.readme` must be a string or a table" raise TypeError(message) return self._readme @property def readme_content_type(self) -> str: """ https://peps.python.org/pep-0621/#readme """ if self._readme_content_type is None: _ = self.readme return cast(str, self._readme_content_type) @property def readme_path(self) -> str: """ https://peps.python.org/pep-0621/#readme """ if self._readme_path is None: _ = self.readme return cast(str, self._readme_path) @property def requires_python(self) -> str: """ https://peps.python.org/pep-0621/#requires-python """ if self._requires_python is None: from packaging.specifiers import InvalidSpecifier, SpecifierSet if "requires-python" in self.config: requires_python = self.config["requires-python"] if "requires-python" in self.dynamic: message = ( "Metadata field `requires-python` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: requires_python = "" if not isinstance(requires_python, str): message = "Field `project.requires-python` must be a string" raise TypeError(message) try: self._python_constraint = SpecifierSet(requires_python) except InvalidSpecifier as e: message = f"Field `project.requires-python` is invalid: {e}" raise ValueError(message) from None self._requires_python = str(self._python_constraint) return self._requires_python @property def python_constraint(self) -> SpecifierSet: from packaging.specifiers import SpecifierSet if self._python_constraint is None: _ = self.requires_python return cast(SpecifierSet, self._python_constraint) @property def license(self) -> str: """ https://peps.python.org/pep-0621/#license """ if self._license is None: if "license" in self.config: data = self.config["license"] if "license" in self.dynamic: message = ( "Metadata field `license` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: data = None if data is None: self._license = "" self._license_expression = "" elif isinstance(data, str): from packaging.licenses import canonicalize_license_expression try: self._license_expression = str(canonicalize_license_expression(data)) except ValueError as e: message = f"Error parsing field `project.license` - {e}" raise ValueError(message) from None self._license = "" elif isinstance(data, dict): if "file" in data and "text" in data: message = "Cannot specify both `file` and `text` in the `project.license` table" raise ValueError(message) if "file" in data: relative_path = data["file"] if not isinstance(relative_path, str): message = "Field `file` in the `project.license` table must be a string" raise TypeError(message) path = os.path.normpath(os.path.join(self.root, relative_path)) if not os.path.isfile(path): message = f"License file does not exist: {relative_path}" raise OSError(message) with open(path, encoding="utf-8") as f: contents = f.read() elif "text" in data: contents = data["text"] if not isinstance(contents, str): message = "Field `text` in the `project.license` table must be a string" raise TypeError(message) else: message = "Must specify either `file` or `text` in the `project.license` table" raise ValueError(message) self._license = contents self._license_expression = "" else: message = "Field `project.license` must be a string or a table" raise TypeError(message) return self._license @property def license_expression(self) -> str: """ https://peps.python.org/pep-0639/ """ if self._license_expression is None: _ = self.license return cast(str, self._license_expression) @property def license_files(self) -> list[str]: """ https://peps.python.org/pep-0639/ """ if self._license_files is None: if "license-files" in self.config: globs = self.config["license-files"] if "license-files" in self.dynamic: message = ( "Metadata field `license-files` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) if isinstance(globs, dict): globs = globs.get("globs", globs.get("paths", [])) else: globs = ["LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*"] from glob import glob license_files: list[str] = [] if not isinstance(globs, list): message = "Field `project.license-files` must be an array" raise TypeError(message) for i, pattern in enumerate(globs, 1): if not isinstance(pattern, str): message = f"Entry #{i} of field `project.license-files` must be a string" raise TypeError(message) full_pattern = os.path.normpath(os.path.join(self.root, pattern)) license_files.extend( os.path.relpath(path, self.root).replace("\\", "/") for path in glob(full_pattern) if os.path.isfile(path) ) self._license_files = sorted(license_files) return self._license_files @property def authors(self) -> list[str]: """ https://peps.python.org/pep-0621/#authors-maintainers """ authors: list[str] authors_data: dict[str, list[str]] if self._authors is None: if "authors" in self.config: authors = self.config["authors"] if "authors" in self.dynamic: message = ( "Metadata field `authors` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: authors = [] if not isinstance(authors, list): message = "Field `project.authors` must be an array" raise TypeError(message) from email.headerregistry import Address authors = deepcopy(authors) authors_data = {"name": [], "email": []} for i, data in enumerate(authors, 1): if not isinstance(data, dict): message = f"Author #{i} of field `project.authors` must be an inline table" raise TypeError(message) name = data.get("name", "") if not isinstance(name, str): message = f"Name of author #{i} of field `project.authors` must be a string" raise TypeError(message) email = data.get("email", "") if not isinstance(email, str): message = f"Email of author #{i} of field `project.authors` must be a string" raise TypeError(message) if name and email: authors_data["email"].append(str(Address(display_name=name, addr_spec=email))) elif email: authors_data["email"].append(str(Address(addr_spec=email))) elif name: authors_data["name"].append(name) else: message = f"Author #{i} of field `project.authors` must specify either `name` or `email`" raise ValueError(message) self._authors = authors self._authors_data = authors_data return self._authors @property def authors_data(self) -> dict[str, list[str]]: """ https://peps.python.org/pep-0621/#authors-maintainers """ if self._authors_data is None: _ = self.authors return cast(dict, self._authors_data) @property def maintainers(self) -> list[str]: """ https://peps.python.org/pep-0621/#authors-maintainers """ maintainers: list[str] if self._maintainers is None: if "maintainers" in self.config: maintainers = self.config["maintainers"] if "maintainers" in self.dynamic: message = ( "Metadata field `maintainers` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: maintainers = [] if not isinstance(maintainers, list): message = "Field `project.maintainers` must be an array" raise TypeError(message) from email.headerregistry import Address maintainers = deepcopy(maintainers) maintainers_data: dict[str, list[str]] = {"name": [], "email": []} for i, data in enumerate(maintainers, 1): if not isinstance(data, dict): message = f"Maintainer #{i} of field `project.maintainers` must be an inline table" raise TypeError(message) name = data.get("name", "") if not isinstance(name, str): message = f"Name of maintainer #{i} of field `project.maintainers` must be a string" raise TypeError(message) email = data.get("email", "") if not isinstance(email, str): message = f"Email of maintainer #{i} of field `project.maintainers` must be a string" raise TypeError(message) if name and email: maintainers_data["email"].append(str(Address(display_name=name, addr_spec=email))) elif email: maintainers_data["email"].append(str(Address(addr_spec=email))) elif name: maintainers_data["name"].append(name) else: message = f"Maintainer #{i} of field `project.maintainers` must specify either `name` or `email`" raise ValueError(message) self._maintainers = maintainers self._maintainers_data = maintainers_data return self._maintainers @property def maintainers_data(self) -> dict[str, list[str]]: """ https://peps.python.org/pep-0621/#authors-maintainers """ if self._maintainers_data is None: _ = self.maintainers return cast(dict, self._maintainers_data) @property def keywords(self) -> list[str]: """ https://peps.python.org/pep-0621/#keywords """ if self._keywords is None: if "keywords" in self.config: keywords = self.config["keywords"] if "keywords" in self.dynamic: message = ( "Metadata field `keywords` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: keywords = [] if not isinstance(keywords, list): message = "Field `project.keywords` must be an array" raise TypeError(message) unique_keywords = set() for i, keyword in enumerate(keywords, 1): if not isinstance(keyword, str): message = f"Keyword #{i} of field `project.keywords` must be a string" raise TypeError(message) unique_keywords.add(keyword) self._keywords = sorted(unique_keywords) return self._keywords @property def classifiers(self) -> list[str]: """ https://peps.python.org/pep-0621/#classifiers """ if self._classifiers is None: import bisect if "classifiers" in self.config: classifiers = self.config["classifiers"] if "classifiers" in self.dynamic: message = ( "Metadata field `classifiers` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: classifiers = [] if not isinstance(classifiers, list): message = "Field `project.classifiers` must be an array" raise TypeError(message) verify_classifiers = not os.environ.get("HATCH_METADATA_CLASSIFIERS_NO_VERIFY") if verify_classifiers: import trove_classifiers known_classifiers = trove_classifiers.classifiers | self._extra_classifiers sorted_classifiers = list(trove_classifiers.sorted_classifiers) for classifier in sorted(self._extra_classifiers - trove_classifiers.classifiers): bisect.insort(sorted_classifiers, classifier) unique_classifiers = set() for i, classifier in enumerate(classifiers, 1): if not isinstance(classifier, str): message = f"Classifier #{i} of field `project.classifiers` must be a string" raise TypeError(message) if ( not self.__classifier_is_private(classifier) and verify_classifiers and classifier not in known_classifiers ): message = f"Unknown classifier in field `project.classifiers`: {classifier}" raise ValueError(message) unique_classifiers.add(classifier) if not verify_classifiers: import re # combined text-numeric sort that ensures that Python versions sort correctly split_re = re.compile(r"(\D*)(\d*)") sorted_classifiers = sorted( classifiers, key=lambda value: ([(a, int(b) if b else None) for a, b in split_re.findall(value)]), ) self._classifiers = sorted( unique_classifiers, key=lambda c: -1 if self.__classifier_is_private(c) else sorted_classifiers.index(c) ) return self._classifiers @property def urls(self) -> dict[str, str]: """ https://peps.python.org/pep-0621/#urls """ if self._urls is None: if "urls" in self.config: urls = self.config["urls"] if "urls" in self.dynamic: message = ( "Metadata field `urls` cannot be both statically defined and listed in field `project.dynamic`" ) raise ValueError(message) else: urls = {} if not isinstance(urls, dict): message = "Field `project.urls` must be a table" raise TypeError(message) sorted_urls = {} for label, url in urls.items(): if not isinstance(url, str): message = f"URL `{label}` of field `project.urls` must be a string" raise TypeError(message) sorted_urls[label] = url self._urls = sorted_urls return self._urls @property def scripts(self) -> dict[str, str]: """ https://peps.python.org/pep-0621/#entry-points """ if self._scripts is None: if "scripts" in self.config: scripts = self.config["scripts"] if "scripts" in self.dynamic: message = ( "Metadata field `scripts` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: scripts = {} if not isinstance(scripts, dict): message = "Field `project.scripts` must be a table" raise TypeError(message) sorted_scripts = {} for name, object_ref in sorted(scripts.items()): if not isinstance(object_ref, str): message = f"Object reference `{name}` of field `project.scripts` must be a string" raise TypeError(message) sorted_scripts[name] = object_ref self._scripts = sorted_scripts return self._scripts @property def gui_scripts(self) -> dict[str, str]: """ https://peps.python.org/pep-0621/#entry-points """ if self._gui_scripts is None: if "gui-scripts" in self.config: gui_scripts = self.config["gui-scripts"] if "gui-scripts" in self.dynamic: message = ( "Metadata field `gui-scripts` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: gui_scripts = {} if not isinstance(gui_scripts, dict): message = "Field `project.gui-scripts` must be a table" raise TypeError(message) sorted_gui_scripts = {} for name, object_ref in sorted(gui_scripts.items()): if not isinstance(object_ref, str): message = f"Object reference `{name}` of field `project.gui-scripts` must be a string" raise TypeError(message) sorted_gui_scripts[name] = object_ref self._gui_scripts = sorted_gui_scripts return self._gui_scripts @property def entry_points(self) -> dict[str, dict[str, str]]: """ https://peps.python.org/pep-0621/#entry-points """ if self._entry_points is None: if "entry-points" in self.config: defined_entry_point_groups = self.config["entry-points"] if "entry-points" in self.dynamic: message = ( "Metadata field `entry-points` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: defined_entry_point_groups = {} if not isinstance(defined_entry_point_groups, dict): message = "Field `project.entry-points` must be a table" raise TypeError(message) for forbidden_field, expected_field in (("console_scripts", "scripts"), ("gui-scripts", "gui-scripts")): if forbidden_field in defined_entry_point_groups: message = ( f"Field `{forbidden_field}` must be defined as `project.{expected_field}` " f"instead of in the `project.entry-points` table" ) raise ValueError(message) entry_point_groups = {} for group, entry_point_data in sorted(defined_entry_point_groups.items()): if not isinstance(entry_point_data, dict): message = f"Field `project.entry-points.{group}` must be a table" raise TypeError(message) entry_points = {} for name, object_ref in sorted(entry_point_data.items()): if not isinstance(object_ref, str): message = f"Object reference `{name}` of field `project.entry-points.{group}` must be a string" raise TypeError(message) entry_points[name] = object_ref if entry_points: entry_point_groups[group] = entry_points self._entry_points = entry_point_groups return self._entry_points @property def dependencies_complex(self) -> dict[str, Requirement]: """ https://peps.python.org/pep-0621/#dependencies-optional-dependencies """ if self._dependencies_complex is None: from packaging.requirements import InvalidRequirement, Requirement if "dependencies" in self.config: dependencies = self.config["dependencies"] if "dependencies" in self.dynamic: message = ( "Metadata field `dependencies` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: dependencies = [] if not isinstance(dependencies, list): message = "Field `project.dependencies` must be an array" raise TypeError(message) dependencies_complex = {} for i, entry in enumerate(dependencies, 1): if not isinstance(entry, str): message = f"Dependency #{i} of field `project.dependencies` must be a string" raise TypeError(message) try: requirement = Requirement(self.context.format(entry)) except InvalidRequirement as e: message = f"Dependency #{i} of field `project.dependencies` is invalid: {e}" raise ValueError(message) from None else: if requirement.url and not self.hatch_metadata.allow_direct_references: message = ( f"Dependency #{i} of field `project.dependencies` cannot be a direct reference unless " f"field `tool.hatch.metadata.allow-direct-references` is set to `true`" ) raise ValueError(message) normalize_requirement(requirement) dependencies_complex[format_dependency(requirement)] = requirement self._dependencies_complex = dict(sorted(dependencies_complex.items())) return self._dependencies_complex @property def dependencies(self) -> list[str]: """ https://peps.python.org/pep-0621/#dependencies-optional-dependencies """ if self._dependencies is None: self._dependencies = list(self.dependencies_complex) return self._dependencies @property def optional_dependencies_complex(self) -> dict[str, dict[str, Requirement]]: """ https://peps.python.org/pep-0621/#dependencies-optional-dependencies """ if self._optional_dependencies_complex is None: from packaging.requirements import InvalidRequirement, Requirement if "optional-dependencies" in self.config: optional_dependencies = self.config["optional-dependencies"] if "optional-dependencies" in self.dynamic: message = ( "Metadata field `optional-dependencies` cannot be both statically defined and " "listed in field `project.dynamic`" ) raise ValueError(message) else: optional_dependencies = {} if not isinstance(optional_dependencies, dict): message = "Field `project.optional-dependencies` must be a table" raise TypeError(message) normalized_options: dict[str, str] = {} optional_dependency_entries = {} inherited_options: dict[str, set[str]] = {} for option, dependencies in optional_dependencies.items(): if not is_valid_project_name(option): message = ( f"Optional dependency group `{option}` of field `project.optional-dependencies` must only " f"contain ASCII letters/digits, underscores, hyphens, and periods, and must begin and end with " f"ASCII letters/digits." ) raise ValueError(message) normalized_option = ( option if self.hatch_metadata.allow_ambiguous_features else normalize_project_name(option) ) if normalized_option in normalized_options: message = ( f"Optional dependency groups `{normalized_options[normalized_option]}` and `{option}` of " f"field `project.optional-dependencies` both evaluate to `{normalized_option}`." ) raise ValueError(message) if not isinstance(dependencies, list): message = ( f"Dependencies for option `{option}` of field `project.optional-dependencies` must be an array" ) raise TypeError(message) entries = {} for i, entry in enumerate(dependencies, 1): if not isinstance(entry, str): message = ( f"Dependency #{i} of option `{option}` of field `project.optional-dependencies` " f"must be a string" ) raise TypeError(message) try: requirement = Requirement(self.context.format(entry)) except InvalidRequirement as e: message = ( f"Dependency #{i} of option `{option}` of field `project.optional-dependencies` " f"is invalid: {e}" ) raise ValueError(message) from None else: if requirement.url and not self.hatch_metadata.allow_direct_references: message = ( f"Dependency #{i} of option `{option}` of field `project.optional-dependencies` " f"cannot be a direct reference unless field " f"`tool.hatch.metadata.allow-direct-references` is set to `true`" ) raise ValueError(message) normalize_requirement(requirement) if requirement.name == self.name: if normalized_option in inherited_options: inherited_options[normalized_option].update(requirement.extras) else: inherited_options[normalized_option] = set(requirement.extras) else: entries[format_dependency(requirement)] = requirement normalized_options[normalized_option] = option optional_dependency_entries[normalized_option] = entries visited: set[str] = set() resolved: set[str] = set() for dependent_option in inherited_options: _resolve_optional_dependencies( optional_dependency_entries, dependent_option, inherited_options, visited, resolved ) self._optional_dependencies_complex = { option: dict(sorted(entries.items())) for option, entries in sorted(optional_dependency_entries.items()) } return self._optional_dependencies_complex @property def optional_dependencies(self) -> dict[str, list[str]]: """ https://peps.python.org/pep-0621/#dependencies-optional-dependencies """ if self._optional_dependencies is None: self._optional_dependencies = { option: list(entries) for option, entries in self.optional_dependencies_complex.items() } return self._optional_dependencies @property def dynamic(self) -> list[str]: """ https://peps.python.org/pep-0621/#dynamic """ if self._dynamic is None: dynamic = self.config.get("dynamic", []) if not isinstance(dynamic, list): message = "Field `project.dynamic` must be an array" raise TypeError(message) if not all(isinstance(entry, str) for entry in dynamic): message = "Field `project.dynamic` must only contain strings" raise TypeError(message) self._dynamic = sorted(dynamic) return self._dynamic def add_known_classifiers(self, classifiers: list[str]) -> None: self._extra_classifiers.update(classifiers) def validate_fields(self) -> None: # Trigger validation for everything for attribute in dir(self): getattr(self, attribute) @staticmethod def __classifier_is_private(classifier: str) -> bool: return classifier.lower().startswith("private ::")
CoreMetadata
python
google__pytype
pytype/pytd/parse/node_test.py
{ "start": 446, "end": 567 }
class ____(Node): """'Data' node. Visitor tests use this to store numbers in leafs.""" d1: Any d2: Any d3: Any
Data
python
openai__openai-python
tests/lib/test_pydantic.py
{ "start": 7726, "end": 7801 }
class ____(Enum): RED = "red" BLUE = "blue" GREEN = "green"
Color
python
kamyu104__LeetCode-Solutions
Python/longest-common-subsequence-between-sorted-arrays.py
{ "start": 71, "end": 839 }
class ____(object): def longestCommomSubsequence(self, arrays): """ :type arrays: List[List[int]] :rtype: List[int] """ result = min(arrays, key=lambda x: len(x)) for arr in arrays: new_result = [] i, j = 0, 0 while i != len(result) and j != len(arr): if result[i] < arr[j]: i += 1 elif result[i] > arr[j]: j += 1 else: new_result.append(result[i]) i += 1 j += 1 result = new_result return result # Time: O(m * n) # Space: O(k), k is min(m * n, max(x for arr in arrays for x in arr)) import collections
Solution
python
scipy__scipy
scipy/cluster/tests/test_hierarchy.py
{ "start": 9694, "end": 11152 }
class ____: def test_mlab_linkage_conversion_empty(self, xp): # Tests from/to_mlab_linkage on empty linkage array. X = xp.asarray([], dtype=xp.float64) xp_assert_equal(from_mlab_linkage(X), X) xp_assert_equal(to_mlab_linkage(X), X) def test_mlab_linkage_conversion_single_row(self, xp): # Tests from/to_mlab_linkage on linkage array with single row. Z = xp.asarray([[0., 1., 3., 2.]]) Zm = xp.asarray([[1, 2, 3]]) xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64), rtol=1e-15) xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), rtol=1e-15) def test_mlab_linkage_conversion_multiple_rows(self, xp): # Tests from/to_mlab_linkage on linkage array with multiple rows. Zm = xp.asarray([[3, 6, 138], [4, 5, 219], [1, 8, 255], [2, 9, 268], [7, 10, 295]]) Z = xp.asarray([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 255., 3.], [1., 8., 268., 4.], [6., 9., 295., 6.]], dtype=xp.float64) xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15) xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), rtol=1e-15) @make_xp_test_case(fclusterdata)
TestMLabLinkageConversion
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/staticExpression1.py
{ "start": 617, "end": 900 }
class ____: DEFINED_FALSE: bool DEFINED_TRUE: bool DEFINED_STR: str dummy = Dummy() if dummy.DEFINED_TRUE: x = 1 else: x = "error!" if not dummy.DEFINED_FALSE: x = 1 else: x = "error!" if dummy.DEFINED_STR == "hi!": x = 1 else: x = "error!"
Dummy
python
jpadilla__pyjwt
jwt/exceptions.py
{ "start": 1299, "end": 1432 }
class ____(InvalidTokenError): """Raised when the specified algorithm is not recognized by PyJWT""" pass
InvalidAlgorithmError
python
kamyu104__LeetCode-Solutions
Python/count-the-number-of-consistent-strings.py
{ "start": 29, "end": 516 }
class ____(object): def countConsistentStrings(self, allowed, words): """ :type allowed: str :type words: List[str] :rtype: int """ lookup = [False]*26 for c in allowed: lookup[ord(c)-ord('a')] = True result = len(words) for word in words: for c in word: if not lookup[ord(c)-ord('a')]: result -= 1 break return result
Solution
python
agronholm__apscheduler
src/apscheduler/datastores/memory.py
{ "start": 872, "end": 14288 }
class ____(BaseDataStore): """ Stores scheduler data in memory, without serializing it. Can be shared between multiple schedulers within the same event loop. """ _tasks: dict[str, Task] = attrs.Factory(dict) _schedules: list[Schedule] = attrs.Factory(list) _schedules_by_id: dict[str, Schedule] = attrs.Factory(dict) _schedules_by_task_id: dict[str, set[Schedule]] = attrs.Factory( partial(defaultdict, set) ) _jobs_by_id: dict[UUID, Job] = attrs.Factory(dict) _jobs_by_task_id: dict[str, set[Job]] = attrs.Factory(partial(defaultdict, set)) _jobs_by_schedule_id: dict[str, set[Job]] = attrs.Factory(partial(defaultdict, set)) _job_results: dict[UUID, JobResult] = attrs.Factory(dict) def __repr__(self) -> str: return create_repr(self) def _find_schedule_index(self, schedule: Schedule) -> int: left_index = bisect_left(self._schedules, schedule) right_index = bisect_right(self._schedules, schedule) return self._schedules.index(schedule, left_index, right_index + 1) async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]: if ids is None: return self._schedules.copy() return [ schedule for schedule in self._schedules if ids is None or schedule.id in ids ] async def add_task(self, task: Task) -> None: if task.id in self._tasks: task.running_jobs = self._tasks[task.id].running_jobs self._tasks[task.id] = task await self._event_broker.publish(TaskUpdated(task_id=task.id)) else: self._tasks[task.id] = task await self._event_broker.publish(TaskAdded(task_id=task.id)) async def remove_task(self, task_id: str) -> None: try: del self._tasks[task_id] except KeyError: raise TaskLookupError(task_id) from None await self._event_broker.publish(TaskRemoved(task_id=task_id)) async def get_task(self, task_id: str) -> Task: try: return self._tasks[task_id] except KeyError: raise TaskLookupError(task_id) from None async def get_tasks(self) -> list[Task]: return sorted(self._tasks.values()) async def add_schedule( self, schedule: Schedule, conflict_policy: ConflictPolicy ) -> None: old_schedule = self._schedules_by_id.get(schedule.id) if old_schedule is not None: if conflict_policy is ConflictPolicy.do_nothing: return elif conflict_policy is ConflictPolicy.exception: raise ConflictingIdError(schedule.id) index = self._find_schedule_index(old_schedule) del self._schedules[index] self._schedules_by_task_id[old_schedule.task_id].remove(old_schedule) self._schedules_by_id[schedule.id] = schedule self._schedules_by_task_id[schedule.task_id].add(schedule) insort_right(self._schedules, schedule) event: ScheduleUpdated | ScheduleAdded if old_schedule is not None: event = ScheduleUpdated( schedule_id=schedule.id, task_id=schedule.task_id, next_fire_time=schedule.next_fire_time, ) else: event = ScheduleAdded( schedule_id=schedule.id, task_id=schedule.task_id, next_fire_time=schedule.next_fire_time, ) await self._event_broker.publish(event) async def remove_schedules( self, ids: Iterable[str], *, finished: bool = False ) -> None: for schedule_id in ids: schedule = self._schedules_by_id.pop(schedule_id, None) if schedule: self._schedules.remove(schedule) event = ScheduleRemoved( schedule_id=schedule.id, task_id=schedule.task_id, finished=finished, ) await self._event_broker.publish(event) async def acquire_schedules( self, scheduler_id: str, lease_duration: timedelta, limit: int ) -> list[Schedule]: now = datetime.now(timezone.utc) acquired_until = now + lease_duration schedules: list[Schedule] = [] for schedule in self._schedules: if schedule.next_fire_time is None or schedule.next_fire_time > now: # The schedule is either exhausted or not yet due. There will be no # schedules that are due after this one, so we can stop here. break elif schedule.paused: # The schedule is paused continue elif schedule.acquired_until is not None: if ( schedule.acquired_by != scheduler_id and now <= schedule.acquired_until ): # The schedule has been acquired by another scheduler and the # timeout has not expired yet continue schedules.append(schedule) schedule.acquired_by = scheduler_id schedule.acquired_until = acquired_until if len(schedules) == limit: break return schedules async def release_schedules( self, scheduler_id: str, results: Sequence[ScheduleResult] ) -> None: # Send update events for schedules for result in results: # Remove the schedule schedule = self._schedules_by_id[result.schedule_id] index = self._find_schedule_index(schedule) del self._schedules[index] # Re-add the schedule to its new position schedule.last_fire_time = result.last_fire_time schedule.next_fire_time = result.next_fire_time schedule.acquired_by = None schedule.acquired_until = None insort_right(self._schedules, schedule) event = ScheduleUpdated( schedule_id=result.schedule_id, task_id=schedule.task_id, next_fire_time=result.next_fire_time, ) await self._event_broker.publish(event) async def get_next_schedule_run_time(self) -> datetime | None: return self._schedules[0].next_fire_time if self._schedules else None async def add_job(self, job: Job) -> None: self._jobs_by_id[job.id] = job self._jobs_by_task_id[job.task_id].add(job) if job.schedule_id is not None: self._jobs_by_schedule_id[job.schedule_id].add(job) event = JobAdded( job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id, ) await self._event_broker.publish(event) async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]: if ids is not None: ids = frozenset(ids) if ids is None: return list(self._jobs_by_id.values()) return [ job for job in self._jobs_by_id.values() if ids is None or job.id in ids ] async def acquire_jobs( self, scheduler_id: str, lease_duration: timedelta, limit: int | None = None ) -> list[Job]: now = datetime.now(timezone.utc) acquired_until = now + lease_duration jobs: list[Job] = [] job_results: dict[Job, JobResult] = {} for job in self._jobs_by_id.values(): task = self._tasks[job.task_id] # Skip already acquired jobs (unless the acquisition lock has expired) if job.acquired_until is not None: if job.acquired_until >= now: continue else: task.running_jobs -= 1 # Discard the job if its start deadline has passed if job.start_deadline and job.start_deadline < now: job_results[job] = JobResult( job_id=job.id, outcome=JobOutcome.missed_start_deadline, finished_at=now, expires_at=now + job.result_expiration_time, ) continue # Skip the job if no more slots are available if ( task.max_running_jobs is not None and task.running_jobs >= task.max_running_jobs ): self._logger.debug( "Skipping job %s because task %r has the maximum number of %d jobs " "already running", job.id, job.task_id, task.running_jobs, ) continue # Mark the job as acquired by this worker jobs.append(job) job.acquired_by = scheduler_id job.acquired_until = acquired_until # Increment the number of running jobs for this task task.running_jobs += 1 # Exit the loop if enough jobs have been acquired if len(jobs) == limit: break # Publish the appropriate events for job in jobs: await self._event_broker.publish( JobAcquired.from_job(job, scheduler_id=scheduler_id) ) # Discard the jobs that could not start for job, result in job_results.items(): await self.release_job(scheduler_id, job, result) return jobs async def release_job(self, scheduler_id: str, job: Job, result: JobResult) -> None: # Record the job result if result.expires_at > result.finished_at: self._job_results[result.job_id] = result # Decrement the number of running jobs for this task if job.acquired_by: self._tasks[job.task_id].running_jobs -= 1 # Delete the job job = self._jobs_by_id.pop(result.job_id) # Remove the job from the jobs belonging to its task task_jobs = self._jobs_by_task_id[job.task_id] task_jobs.remove(job) if not task_jobs: del self._jobs_by_task_id[job.task_id] # If this was a scheduled job, remove the job from the set of jobs belonging to # this schedule if job.schedule_id: schedule_jobs = self._jobs_by_schedule_id[job.schedule_id] schedule_jobs.remove(job) if not schedule_jobs: del self._jobs_by_schedule_id[job.schedule_id] # Notify other schedulers await self._event_broker.publish( JobReleased.from_result( result, scheduler_id, job.task_id, job.schedule_id, job.scheduled_fire_time, ) ) async def get_job_result(self, job_id: UUID) -> JobResult | None: return self._job_results.pop(job_id, None) async def extend_acquired_schedule_leases( self, scheduler_id: str, schedule_ids: set[str], duration: timedelta ) -> None: acquired_until = datetime.now(timezone.utc) + duration for schedule in self._schedules: if schedule.acquired_by == scheduler_id and schedule.id in schedule_ids: schedule.acquired_until = acquired_until async def extend_acquired_job_leases( self, scheduler_id: str, job_ids: set[UUID], duration: timedelta ) -> None: acquired_until = datetime.now(timezone.utc) + duration for job in self._jobs_by_id.values(): if job.acquired_by == scheduler_id and job.id in job_ids: job.acquired_until = acquired_until async def reap_abandoned_jobs(self, scheduler_id: str) -> None: now = datetime.now(timezone.utc) for job in list(self._jobs_by_id.values()): if job.acquired_by == scheduler_id: result = JobResult.from_job( job=job, outcome=JobOutcome.abandoned, finished_at=now ) await self.release_job(job.acquired_by, job, result) async def cleanup(self) -> None: # Clean up expired job results now = datetime.now(timezone.utc) expired_job_ids = [ result.job_id for result in self._job_results.values() if result.expires_at <= now ] for job_id in expired_job_ids: del self._job_results[job_id] # Finish any jobs whose leases have expired expired_jobs = [ job for job in self._jobs_by_id.values() if job.acquired_until is not None and job.acquired_until < now ] for job in expired_jobs: result = JobResult.from_job( job=job, outcome=JobOutcome.abandoned, finished_at=now ) assert job.acquired_by is not None await self.release_job(job.acquired_by, job, result) # Clean up finished schedules that have no running jobs finished_schedule_ids = [ schedule_id for schedule_id, schedule in self._schedules_by_id.items() if schedule.next_fire_time is None and schedule_id not in self._jobs_by_schedule_id ] await self.remove_schedules(finished_schedule_ids, finished=True)
MemoryDataStore
python
run-llama__llama_index
llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/query.py
{ "start": 5587, "end": 9306 }
class ____(BaseChatEngine): def __init__( self, retriever: VectaraRetriever, streaming: bool = False, summary_response_lang: str = "eng", summary_num_results: int = 5, summary_prompt_name: str = "vectara-summary-ext-24-05-med-omni", node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, callback_manager: Optional[CallbackManager] = None, verbose: bool = False, **kwargs: Any, ) -> None: self._retriever = retriever self._streaming = streaming self._summary_enabled = True self._summary_response_lang = summary_response_lang self._summary_num_results = summary_num_results self._summary_prompt_name = summary_prompt_name self._node_postprocessors = node_postprocessors or [] self._verbose = verbose self.callback_manager = callback_manager or CallbackManager([]) for node_postprocessor in self._node_postprocessors: node_postprocessor.callback_manager = self.callback_manager self.conv_id = None @classmethod def from_args( cls, retriever: VectaraRetriever, streaming: bool = False, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> "VectaraChatEngine": """Initialize a ContextChatEngine from default parameters.""" node_postprocessors = node_postprocessors or [] return cls( retriever, streaming, node_postprocessors=node_postprocessors, callback_manager=Settings.callback_manager, **kwargs, ) def chat(self, message: str) -> AgentChatResponse: """Chat with the agent.""" with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: message} ) as query_event: kwargs = ( { "response_language": self._summary_response_lang, "max_used_search_results": self._summary_num_results, "generation_preset_name": self._summary_prompt_name, } if self._summary_enabled else {} ) nodes, summary, self.conv_id = self._retriever._vectara_query( QueryBundle(message), chat=True, conv_id=self.conv_id, verbose=self._verbose, **kwargs, ) query_event.on_end(payload={EventPayload.RESPONSE: summary["text"]}) return AgentChatResponse( response=summary["text"], source_nodes=nodes, metadata={"fcs": summary.get("fcs", None)}, ) async def achat(self, message: str) -> AgentChatResponse: """Chat with the agent asynchronously.""" return await self.chat(message) def set_chat_id(self, source_nodes: List, metadata: Dict) -> None: """Callback function for setting the conv_id.""" self.conv_id = metadata.get("chat_id", self.conv_id) def stream_chat(self, message: str) -> StreamingAgentChatResponse: query_bundle = QueryBundle(message) return self._retriever._vectara_stream( query_bundle, chat=True, conv_id=self.conv_id, callback_func=self.set_chat_id, ) async def astream_chat(self, message: str) -> StreamingAgentChatResponse: return await self.stream_chat(message) def reset(self) -> None: self.conv_id = None def chat_history(self) -> List[str]: return ["Not implemented Yet."]
VectaraChatEngine
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_pie01.py
{ "start": 315, "end": 1195 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_pie01.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "pie"}) data = [ [2, 4, 6], [60, 30, 10], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$3", "values": "=Sheet1!$B$1:$B$3", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py
{ "start": 4431, "end": 6612 }
class ____(str, Enum): """Enumeration for operator classes used in vector indexes.""" # Full-precision dense vector operator classes vector_cosine_ops = "vector_cosine_ops" vector_ip_ops = "vector_ip_ops" vector_l1_ops = "vector_l1_ops" vector_l2_ops = "vector_l2_ops" # Half-precision dense vector operator classes halfvec_cosine_ops = "halfvec_cosine_ops" halfvec_ip_ops = "halfvec_ip_ops" halfvec_l1_ops = "halfvec_l1_ops" halfvec_l2_ops = "halfvec_l2_ops" # Sparse vector operator classes sparsevec_cosine_ops = "sparsevec_cosine_ops" sparsevec_ip_ops = "sparsevec_ip_ops" sparsevec_l1_ops = "sparsevec_l1_ops" sparsevec_l2_ops = "sparsevec_l2_ops" # Bit vector operator classes bit_hamming_ops = "bit_hamming_ops" bit_jaccard_ops = "bit_jaccard_ops" def to_operator(self) -> str: """Return the distance operator as a string. :return: The distance operator string. :rtype: str :raises ValueError: If the vector operator class is unsupported. """ match self: case ( VectorOpClass.vector_cosine_ops | VectorOpClass.halfvec_cosine_ops | VectorOpClass.sparsevec_cosine_ops ): return "<=>" case ( VectorOpClass.vector_ip_ops | VectorOpClass.halfvec_ip_ops | VectorOpClass.sparsevec_ip_ops ): return "<#>" case ( VectorOpClass.vector_l1_ops | VectorOpClass.halfvec_l1_ops | VectorOpClass.sparsevec_l1_ops ): return "<+>" case ( VectorOpClass.vector_l2_ops | VectorOpClass.halfvec_l2_ops | VectorOpClass.sparsevec_l2_ops ): return "<->" case VectorOpClass.bit_hamming_ops: return "<~>" case VectorOpClass.bit_jaccard_ops: return "<%>" case _: raise ValueError(f"Unsupported vector operator class: {self}")
VectorOpClass
python
django__django
tests/file_uploads/tests.py
{ "start": 31519, "end": 33109 }
class ____(SimpleTestCase): """ Tests for error handling during directory creation via _save_FIELD_file (ticket #6450) """ @classmethod def setUpClass(cls): super().setUpClass() os.makedirs(MEDIA_ROOT, exist_ok=True) cls.addClassCleanup(shutil.rmtree, MEDIA_ROOT) def setUp(self): self.obj = FileModel() @unittest.skipIf( sys.platform == "win32", "Python on Windows doesn't have working os.chmod()." ) @override_settings( STORAGES={ DEFAULT_STORAGE_ALIAS: { "BACKEND": "django.core.files.storage.FileSystemStorage", } } ) def test_readonly_root(self): """Permission errors are not swallowed""" os.chmod(MEDIA_ROOT, 0o500) self.addCleanup(os.chmod, MEDIA_ROOT, 0o700) with self.assertRaises(PermissionError): self.obj.testfile.save( "foo.txt", SimpleUploadedFile("foo.txt", b"x"), save=False ) def test_not_a_directory(self): default_storage.delete(UPLOAD_TO) # Create a file with the upload directory name with SimpleUploadedFile(UPLOAD_TO, b"x") as file: default_storage.save(UPLOAD_FOLDER, file) self.addCleanup(default_storage.delete, UPLOAD_TO) msg = "%s exists and is not a directory." % UPLOAD_TO with self.assertRaisesMessage(FileExistsError, msg): with SimpleUploadedFile("foo.txt", b"x") as file: self.obj.testfile.save("foo.txt", file, save=False)
DirectoryCreationTests
python
python__mypy
mypyc/ir/ops.py
{ "start": 10669, "end": 11640 }
class ____(BaseAssign): """Assign multiple values to a Register (dest = src1, src2, ...). This is used to initialize RArray values. It's provided to avoid very verbose IR for common vectorcall operations. Note that this interacts atypically with reference counting. We assume that each RArray register is initialized exactly once with this op. """ error_kind = ERR_NEVER def __init__(self, dest: Register, src: list[Value], line: int = -1) -> None: super().__init__(dest, line) assert src assert isinstance(dest.type, RArray) assert dest.type.length == len(src) self.src = src def sources(self) -> list[Value]: return self.src.copy() def set_sources(self, new: list[Value]) -> None: self.src = new[:] def stolen(self) -> list[Value]: return [] def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_assign_multi(self)
AssignMulti
python
getsentry__sentry
src/sentry/plugins/bases/issue.py
{ "start": 942, "end": 9708 }
class ____(Plugin): # project_conf_form = BaseIssueOptionsForm new_issue_form: type[forms.Form] = NewIssueForm create_issue_template = "sentry/plugins/bases/issue/create_issue.html" not_configured_template = "sentry/plugins/bases/issue/not_configured.html" needs_auth_template = "sentry/plugins/bases/issue/needs_auth.html" auth_provider: str | None = None def get_plugin_type(self) -> str: return "issue-tracking" def _get_group_body(self, group, event, **kwargs): result = [] for interface in event.interfaces.values(): output = safe_execute(interface.to_string, event) if output: result.append(output) return "\n\n".join(result) def _get_group_description(self, group, event): referrer = self.get_conf_key() + "_plugin" output = [absolute_uri(group.get_absolute_url(params={"referrer": referrer}))] body = self._get_group_body(group, event) if body: output.extend(["", "```", body, "```"]) return "\n".join(output) def _get_group_title(self, group, event): return event.title def is_configured(self, project) -> bool: raise NotImplementedError def get_auth_for_user(self, user, **kwargs) -> RpcUserSocialAuth | None: """ Return a ``RpcUserSocialAuth`` object for the given user based on this plugins ``auth_provider``. """ assert self.auth_provider, "There is no auth provider configured for this plugin." if not user.is_authenticated: return None auth = usersocialauth_service.get_one_or_none( filter={"user_id": user.id, "provider": self.auth_provider} ) return auth def needs_auth(self, request: Request, project, **kwargs): """ Return ``True`` if the authenticated user needs to associate an auth service before performing actions with this plugin. """ if self.auth_provider is None: return False if not request.user.is_authenticated: return True auth = usersocialauth_service.get_one_or_none( filter={"user_id": request.user.id, "provider": self.auth_provider} ) return bool(auth) def get_new_issue_title(self, **kwargs): """ Return a string for the "Create new issue" action label. """ return "Create %s Issue" % self.get_title() def get_new_issue_form(self, request: Request, group, event, **kwargs): """ Return a Form for the "Create new issue" page. """ return self.new_issue_form( request.POST or None, initial=self.get_initial_form_data(request, group, event) ) def get_issue_url(self, group, issue_id: str) -> str: """ Given an issue_id (string) return an absolute URL to the issue's details page. """ raise NotImplementedError def get_issue_label(self, group, issue_id) -> str: """ Given an issue_id (string) return a string representing the issue. e.g. GitHub represents issues as GH-XXX """ return "#%s" % issue_id def create_issue(self, request: Request, group, form_data): """ Creates the issue on the remote service and returns an issue ID. """ raise NotImplementedError def link_issue(self, request: Request, group, form_data, **kwargs): """ Can be overridden for any actions needed when linking issues (like adding a comment to an existing issue). """ def get_initial_form_data(self, request: Request, group, event, **kwargs): return { "description": self._get_group_description(group, event), "title": self._get_group_title(group, event), } def has_auth_configured(self, **kwargs): if not self.auth_provider: return True return self.auth_provider in get_auth_providers() def view(self, request: Request, group, **kwargs): has_auth_configured = self.has_auth_configured() if not (has_auth_configured and self.is_configured(project=group.project)): if self.auth_provider: required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider] else: required_auth_settings = None project = group.project return self.render( self.not_configured_template, { "title": self.get_title(), "project": group.project, "has_auth_configured": has_auth_configured, "required_auth_settings": required_auth_settings, "plugin_link": f"/settings/{project.organization.slug}/projects/{project.slug}/plugins/{self.slug}/", }, ) if self.needs_auth(project=group.project, request=request): return self.render( self.needs_auth_template, {"title": self.get_title(), "project": group.project} ) if GroupMeta.objects.get_value(group, "%s:tid" % self.get_conf_key(), None): return None prefix = self.get_conf_key() event = group.get_latest_event() op = request.POST.get("op", "create") create_form = self.get_new_issue_form(request, group, event) if op == "create": if create_form.is_valid(): try: issue_id = self.create_issue( group=group, form_data=create_form.cleaned_data, request=request ) except forms.ValidationError as e: create_form.errors["__all__"] = ["Error creating issue: %s" % e] else: if create_form.is_valid(): GroupMeta.objects.set_value(group, "%s:tid" % prefix, issue_id) issue_information = { "title": create_form.cleaned_data["title"], "provider": self.get_title(), "location": self.get_issue_url(group, issue_id), "label": self.get_issue_label(group=group, issue_id=issue_id), } Activity.objects.create( project=group.project, group=group, type=ActivityType.CREATE_ISSUE.value, user_id=request.user.id, data=issue_information, ) try: analytics.record( IssueTrackerUsedEvent( user_id=request.user.id, default_user_id=project.organization.get_default_owner().id, organization_id=project.organization_id, project_id=project.id, issue_tracker=self.slug, ) ) except Exception as e: sentry_sdk.capture_exception(e) return self.redirect(group.get_absolute_url()) context = { "create_form": create_form, # pass in 'form' for legacy compat "form": create_form, "title": self.get_new_issue_title(), "can_link_existing_issues": self.can_link_existing_issues, "op": op, } return self.render(self.create_issue_template, context) def actions(self, group, action_list, **kwargs): if not self.is_configured(project=group.project): return action_list prefix = self.get_conf_key() if not GroupMeta.objects.get_value(group, "%s:tid" % prefix, None): action_list.append((self.get_new_issue_title(), self.get_url(group))) return action_list def tags(self, request: Request, group, tag_list, **kwargs): if not self.is_configured(project=group.project): return tag_list prefix = self.get_conf_key() issue_id = GroupMeta.objects.get_value(group, "%s:tid" % prefix) if not issue_id: return tag_list tag_list.append( { "url": self.get_issue_url(group=group, issue_id=issue_id), "displayName": self.get_issue_label(group=group, issue_id=issue_id), } ) return tag_list IssuePlugin = IssueTrackingPlugin
IssueTrackingPlugin
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF056.py
{ "start": 1818, "end": 5736 }
class ____: def method(self): return self.data.get("key", {}) # Using dict.get in a nested function with falsy fallback def outer(): def inner(): return my_dict.get("key", "") return inner() # Using dict.get with variable fallback that is falsy falsy_value = None value = my_dict.get("key", falsy_value) # Using dict.get with variable fallback that is truthy truthy_value = "exists" value = my_dict.get("key", truthy_value) # Using dict.get with complex expressions as fallback value = my_dict.get("key", 0 or "default") value = my_dict.get("key", [] if condition else {}) # testing dict.get call using kwargs value = my_dict.get(key="key", default=False) value = my_dict.get(default=[], key="key") # Edge Cases dicts = [my_dict, my_dict, my_dict] # Falsy fallback in a lambda get_fallback = lambda d: d.get("key", False) # Falsy fallback in a list comprehension results = [d.get("key", "") for d in dicts] # Falsy fallback in a generator expression results = (d.get("key", None) for d in dicts) # Falsy fallback in a ternary expression value = my_dict.get("key", 0) if True else "default" # Falsy fallback with inline comment value = my_dict.get("key", # comment1 [] # comment2 ) # comment3 # Invalid # Invalid falsy fallbacks are when the call to dict.get is used in a boolean context # dict.get in ternary expression value = "not found" if my_dict.get("key", False) else "default" # [RUF056] # dict.get in an if statement if my_dict.get("key", False): # [RUF056] pass # dict.get in compound if statement if True and my_dict.get("key", False): # [RUF056] pass if my_dict.get("key", False) or True: # [RUF056] pass # dict.get in an assert statement assert my_dict.get("key", False) # [RUF056] # dict.get in a while statement while my_dict.get("key", False): # [RUF056] pass # dict.get in unary not expression value = not my_dict.get("key", False) # [RUF056] # testing all falsy fallbacks value = not my_dict.get("key", False) # [RUF056] value = not my_dict.get("key", []) # [RUF056] value = not my_dict.get("key", list()) # [RUF056] value = not my_dict.get("key", {}) # [RUF056] value = not my_dict.get("key", dict()) # [RUF056] value = not my_dict.get("key", set()) # [RUF056] value = not my_dict.get("key", None) # [RUF056] value = not my_dict.get("key", 0) # [RUF056] value = not my_dict.get("key", 0.0) # [RUF056] value = not my_dict.get("key", "") # [RUF056] # testing invalid dict.get call with inline comment value = not my_dict.get("key", # comment1 [] # comment2 ) # [RUF056] # regression tests for https://github.com/astral-sh/ruff/issues/18628 # we should avoid fixes when there are "unknown" arguments present, including # extra positional arguments, either of the positional-only arguments passed as # a keyword, or completely unknown keywords. # extra positional not my_dict.get("key", False, "?!") # `default` is positional-only, so these are invalid not my_dict.get("key", default=False) not my_dict.get(key="key", default=False) not my_dict.get(default=[], key="key") not my_dict.get(default=False) not my_dict.get(key="key", other="something", default=False) not my_dict.get(default=False, other="something", key="test") # comments don't really matter here because of the kwargs but include them for # completeness not my_dict.get( key="key", # comment1 default=False, # comment2 ) # comment 3 not my_dict.get( default=[], # comment1 key="key", # comment2 ) # comment 3 # the fix is arguably okay here because the same `takes no keyword arguments` # TypeError is raised at runtime before and after the fix, but we still bail # out for having an unrecognized number of arguments not my_dict.get("key", False, foo=...) # https://github.com/astral-sh/ruff/issues/18798 d = {} not d.get("key", (False))
MyClass
python
allegroai__clearml
clearml/backend_api/services/v2_13/projects.py
{ "start": 104079, "end": 105226 }
class ____(Response): """ Response of projects.make_private endpoint. :param updated: Number of projects updated :type updated: int """ _service = "projects" _action = "make_private" _version = "2.13" _schema = { "definitions": {}, "properties": { "updated": { "description": "Number of projects updated", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(MakePrivateResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
MakePrivateResponse
python
great-expectations__great_expectations
great_expectations/render/renderer/content_block/profiling_column_properties_table_content_block.py
{ "start": 389, "end": 2572 }
class ____(ContentBlockRenderer): expectation_renderers = { "expect_column_values_to_not_match_regex": [ LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_REGEX_COUNT_ROW ], "expect_column_unique_value_count_to_be_between": [ LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_DISTINCT_COUNT_ROW ], "expect_column_proportion_of_unique_values_to_be_between": [ LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_DISTINCT_PERCENT_ROW ], "expect_column_values_to_not_be_null": [ LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_COUNT_ROW, LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_PERCENT_ROW, ], } @classmethod @override def render(cls, ge_object, header_row=None): """Each expectation method should return a list of rows""" if header_row is None: header_row = [] table_rows = [] if isinstance(ge_object, list): for sub_object in ge_object: expectation_type = cls._get_expectation_type(sub_object) if expectation_type in cls.expectation_renderers: new_rows = [ get_renderer_impl(expectation_type, renderer_type)[1](result=sub_object) for renderer_type in cls.expectation_renderers.get(expectation_type) ] table_rows.extend(new_rows) else: expectation_type = cls._get_expectation_type(ge_object) if expectation_type in cls.expectation_renderers: new_rows = [ get_renderer_impl(expectation_type, renderer_type)[1](result=ge_object) for renderer_type in cls.expectation_renderers.get(expectation_type) ] table_rows.extend(new_rows) return RenderedTableContent( **{ "content_block_type": "table", "header_row": header_row, "table": table_rows, } )
ProfilingColumnPropertiesTableContentBlockRenderer
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 19304, "end": 19723 }
class ____(ChainedSource): def __post_init__(self) -> None: assert self.base is not None def reconstruct(self, codegen: "PyCodegen") -> None: codegen(self.base) def guard_source(self) -> GuardSource: return self.base.guard_source() def name(self) -> str: return f"cast_symbool_to_symint_guardless({self.base.name()})" @dataclasses.dataclass(frozen=True)
ConvertIntSource
python
kubernetes-client__python
kubernetes/base/config/kube_config_test.py
{ "start": 63243, "end": 64322 }
class ____(BaseTestCase): # Verifies properties of kubernetes.client.Configuration. # These tests guard against changes to the upstream configuration class, # since GCP and Exec authorization use refresh_api_key_hook to refresh # their tokens regularly. def test_refresh_api_key_hook_exists(self): self.assertTrue(hasattr(Configuration(), 'refresh_api_key_hook')) def test_get_api_key_calls_refresh_api_key_hook(self): identifier = 'authorization' expected_token = 'expected_token' old_token = 'old_token' config = Configuration( api_key={identifier: old_token}, api_key_prefix={identifier: 'Bearer'} ) def refresh_api_key_hook(client_config): self.assertEqual(client_config, config) client_config.api_key[identifier] = expected_token config.refresh_api_key_hook = refresh_api_key_hook self.assertEqual('Bearer ' + expected_token, config.get_api_key_with_prefix(identifier))
TestKubernetesClientConfiguration
python
django__django
tests/postgres_tests/test_indexes.py
{ "start": 5938, "end": 6719 }
class ____(IndexTestMixin, PostgreSQLSimpleTestCase): index_class = GinIndex def test_suffix(self): self.assertEqual(GinIndex.suffix, "gin") def test_deconstruction(self): index = GinIndex( fields=["title"], name="test_title_gin", fastupdate=True, gin_pending_list_limit=128, ) path, args, kwargs = index.deconstruct() self.assertEqual(path, "django.contrib.postgres.indexes.GinIndex") self.assertEqual(args, ()) self.assertEqual( kwargs, { "fields": ["title"], "name": "test_title_gin", "fastupdate": True, "gin_pending_list_limit": 128, }, )
GinIndexTests
python
jupyterlab__jupyterlab
jupyterlab/semver.py
{ "start": 19578, "end": 20972 }
class ____: semver = None def __init__(self, comp, loose): logger.debug("comparator: %s %s", comp, loose) self.loose = loose self.parse(comp) if self.semver == ANY: self.value = "" else: self.value = self.operator + self.semver.version def parse(self, comp): r = regexp[COMPARATORLOOSE] if self.loose else regexp[COMPARATOR] logger.debug("parse comp=%s", comp) m = r.search(comp) if m is None: raise ValueError(f"Invalid comparator: {comp}") self.operator = m.group(1) # if it literally is just '>' or '' then allow anything. if m.group(2) is None: self.semver = ANY else: self.semver = semver(m.group(2), self.loose) def __repr__(self): return f'<SemVer Comparator "{self}">' def __str__(self): return self.value def test(self, version): logger.debug("Comparator, test %s, %s", version, self.loose) if self.semver == ANY: return True else: return cmp(version, self.operator, self.semver, self.loose) def make_range(range_, loose): if isinstance(range_, Range) and range_.loose == loose: return range_ # if (!(this instanceof Range)) # return new Range(range, loose); return Range(range_, loose)
Comparator
python
numba__numba
numba/tests/test_array_exprs.py
{ "start": 3063, "end": 14277 }
class ____(MemoryLeakMixin, TestCase): def _compile_function(self, fn, arg_tys): """ Compile the given function both without and with rewrites enabled. """ control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) cres_0 = control_pipeline.compile_extra(fn) control_cfunc = cres_0.entry_point test_pipeline = RewritesTester.mk_pipeline(arg_tys) cres_1 = test_pipeline.compile_extra(fn) test_cfunc = cres_1.entry_point return control_pipeline, control_cfunc, test_pipeline, test_cfunc def test_simple_expr(self): ''' Using a simple array expression, verify that rewriting is taking place, and is fusing loops. ''' A = np.linspace(0,1,10) X = np.linspace(2,1,10) Y = np.linspace(1,2,10) arg_tys = [typeof(arg) for arg in (A, X, Y)] control_pipeline, nb_axy_0, test_pipeline, nb_axy_1 = \ self._compile_function(axy, arg_tys) control_pipeline2 = RewritesTester.mk_no_rw_pipeline(arg_tys) cres_2 = control_pipeline2.compile_extra(ax2) nb_ctl = cres_2.entry_point expected = nb_axy_0(A, X, Y) actual = nb_axy_1(A, X, Y) control = nb_ctl(A, X, Y) np.testing.assert_array_equal(expected, actual) np.testing.assert_array_equal(control, actual) ir0 = control_pipeline.state.func_ir.blocks ir1 = test_pipeline.state.func_ir.blocks ir2 = control_pipeline2.state.func_ir.blocks self.assertEqual(len(ir0), len(ir1)) self.assertEqual(len(ir0), len(ir2)) # The rewritten IR should be smaller than the original. self.assertGreater(len(ir0[0].body), len(ir1[0].body)) self.assertEqual(len(ir0[0].body), len(ir2[0].body)) def _get_array_exprs(self, block): for instr in block: if isinstance(instr, ir.Assign): if isinstance(instr.value, ir.Expr): if instr.value.op == 'arrayexpr': yield instr def _array_expr_to_set(self, expr, out=None): ''' Convert an array expression tree into a set of operators. ''' if out is None: out = set() if not isinstance(expr, tuple): raise ValueError("{0} not a tuple".format(expr)) operation, operands = expr processed_operands = [] for operand in operands: if isinstance(operand, tuple): operand, _ = self._array_expr_to_set(operand, out) processed_operands.append(operand) processed_expr = operation, tuple(processed_operands) out.add(processed_expr) return processed_expr, out def _test_root_function(self, fn=pos_root): A = np.random.random(10) B = np.random.random(10) + 1. # Increase likelihood of real # root (could add 2 to force all # roots to be real). C = np.random.random(10) arg_tys = [typeof(arg) for arg in (A, B, C)] control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) control_cres = control_pipeline.compile_extra(fn) nb_fn_0 = control_cres.entry_point test_pipeline = RewritesTester.mk_pipeline(arg_tys) test_cres = test_pipeline.compile_extra(fn) nb_fn_1 = test_cres.entry_point np_result = fn(A, B, C) nb_result_0 = nb_fn_0(A, B, C) nb_result_1 = nb_fn_1(A, B, C) np.testing.assert_array_almost_equal(np_result, nb_result_0) np.testing.assert_array_almost_equal(nb_result_0, nb_result_1) return Namespace(locals()) def _test_cube_function(self, fn=cube): A = np.arange(10, dtype=np.float64) arg_tys = (typeof(A),) control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) control_cres = control_pipeline.compile_extra(fn) nb_fn_0 = control_cres.entry_point test_pipeline = RewritesTester.mk_pipeline(arg_tys) test_cres = test_pipeline.compile_extra(fn) nb_fn_1 = test_cres.entry_point expected = A ** 3 self.assertPreciseEqual(expected, nb_fn_0(A)) self.assertPreciseEqual(expected, nb_fn_1(A)) return Namespace(locals()) def _test_explicit_output_function(self, fn): """ Test function having a (a, b, out) signature where *out* is an output array the function writes into. """ A = np.arange(10, dtype=np.float64) B = A + 1 arg_tys = (typeof(A),) * 3 control_pipeline, control_cfunc, test_pipeline, test_cfunc = \ self._compile_function(fn, arg_tys) def run_func(fn): out = np.zeros_like(A) fn(A, B, out) return out expected = run_func(fn) self.assertPreciseEqual(expected, run_func(control_cfunc)) self.assertPreciseEqual(expected, run_func(test_cfunc)) return Namespace(locals()) def _assert_array_exprs(self, block, expected_count): """ Assert the *block* has the expected number of array expressions in it. """ rewrite_count = len(list(self._get_array_exprs(block))) self.assertEqual(rewrite_count, expected_count) def _assert_total_rewrite(self, control_ir, test_ir, trivial=False): """ Given two dictionaries of Numba IR blocks, check to make sure the control IR has no array expressions, while the test IR contains one and only one. """ # Both IRs have the same number of blocks (presumably 1) self.assertEqual(len(control_ir), len(test_ir)) control_block = control_ir[0].body test_block = test_ir[0].body self._assert_array_exprs(control_block, 0) self._assert_array_exprs(test_block, 1) if not trivial: # If the expression wasn't trivial, the block length should # have decreased (since a sequence of exprs was replaced # with a single nested array expr). self.assertGreater(len(control_block), len(test_block)) def _assert_no_rewrite(self, control_ir, test_ir): """ Given two dictionaries of Numba IR blocks, check to make sure the control IR and the test IR both have no array expressions. """ self.assertEqual(len(control_ir), len(test_ir)) # All blocks should be identical, and not rewritten for k, v in control_ir.items(): control_block = v.body test_block = test_ir[k].body self.assertEqual(len(control_block), len(test_block)) self._assert_array_exprs(control_block, 0) self._assert_array_exprs(test_block, 0) def test_trivial_expr(self): """ Ensure even a non-nested expression is rewritten, as it can enable scalar optimizations such as rewriting `x ** 2`. """ ns = self._test_cube_function() self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, ns.test_pipeline.state.func_ir.blocks, trivial=True) def test_complicated_expr(self): ''' Using the polynomial root function, ensure the full expression is being put in the same kernel with no remnants of intermediate array expressions. ''' ns = self._test_root_function() self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, ns.test_pipeline.state.func_ir.blocks) def test_common_subexpressions(self, fn=neg_root_common_subexpr): ''' Attempt to verify that rewriting will incorporate user common subexpressions properly. ''' ns = self._test_root_function(fn) ir0 = ns.control_pipeline.state.func_ir.blocks ir1 = ns.test_pipeline.state.func_ir.blocks self.assertEqual(len(ir0), len(ir1)) self.assertGreater(len(ir0[0].body), len(ir1[0].body)) self.assertEqual(len(list(self._get_array_exprs(ir0[0].body))), 0) # Verify that we didn't rewrite everything into a monolithic # array expression since we stored temporary values in # variables that might be used later (from the optimization's # point of view). array_expr_instrs = list(self._get_array_exprs(ir1[0].body)) self.assertGreater(len(array_expr_instrs), 1) # Now check that we haven't duplicated any subexpressions in # the rewritten code. array_sets = list(self._array_expr_to_set(instr.value.expr)[1] for instr in array_expr_instrs) for expr_set_0, expr_set_1 in zip(array_sets[:-1], array_sets[1:]): intersections = expr_set_0.intersection(expr_set_1) if intersections: self.fail("Common subexpressions detected in array " "expressions ({0})".format(intersections)) def test_complex_subexpression(self): return self.test_common_subexpressions(neg_root_complex_subexpr) def test_ufunc_and_dufunc_calls(self): ''' Verify that ufunc and DUFunc calls are being properly included in array expressions. ''' A = np.random.random(10) B = np.random.random(10) arg_tys = [typeof(arg) for arg in (A, B)] vaxy_descr = vaxy._dispatcher.targetdescr control_pipeline = RewritesTester.mk_no_rw_pipeline( arg_tys, typing_context=vaxy_descr.typing_context, target_context=vaxy_descr.target_context) cres_0 = control_pipeline.compile_extra(call_stuff) nb_call_stuff_0 = cres_0.entry_point test_pipeline = RewritesTester.mk_pipeline( arg_tys, typing_context=vaxy_descr.typing_context, target_context=vaxy_descr.target_context) cres_1 = test_pipeline.compile_extra(call_stuff) nb_call_stuff_1 = cres_1.entry_point expected = call_stuff(A, B) control = nb_call_stuff_0(A, B) actual = nb_call_stuff_1(A, B) np.testing.assert_array_almost_equal(expected, control) np.testing.assert_array_almost_equal(expected, actual) self._assert_total_rewrite(control_pipeline.state.func_ir.blocks, test_pipeline.state.func_ir.blocks) def test_cmp_op(self): ''' Verify that comparison operators are supported by the rewriter. ''' ns = self._test_root_function(are_roots_imaginary) self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, ns.test_pipeline.state.func_ir.blocks) def test_explicit_output(self): """ Check that ufunc calls with explicit outputs are not rewritten. """ ns = self._test_explicit_output_function(explicit_output) self._assert_no_rewrite(ns.control_pipeline.state.func_ir.blocks, ns.test_pipeline.state.func_ir.blocks)
TestArrayExpressions
python
networkx__networkx
networkx/classes/tests/test_graphviews.py
{ "start": 5202, "end": 11496 }
class ____: @classmethod def setup_class(cls): cls.G = nx.path_graph(9) cls.DG = nx.path_graph(9, create_using=nx.DiGraph()) cls.MG = nx.path_graph(9, create_using=nx.MultiGraph()) cls.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) cls.Gv = nx.to_undirected(cls.DG) cls.DGv = nx.to_directed(cls.G) cls.MGv = nx.to_undirected(cls.MDG) cls.MDGv = nx.to_directed(cls.MG) cls.Rv = cls.DG.reverse() cls.MRv = cls.MDG.reverse() cls.graphs = [ cls.G, cls.DG, cls.MG, cls.MDG, cls.Gv, cls.DGv, cls.MGv, cls.MDGv, cls.Rv, cls.MRv, ] for G in cls.graphs: G.edges, G.nodes, G.degree def test_pickle(self): import pickle for G in self.graphs: H = pickle.loads(pickle.dumps(G, -1)) assert edges_equal(H.edges, G.edges, directed=G.is_directed()) assert nodes_equal(H.nodes, G.nodes) def test_subgraph_of_subgraph(self): SGv = nx.subgraph(self.G, range(3, 7)) SDGv = nx.subgraph(self.DG, range(3, 7)) SMGv = nx.subgraph(self.MG, range(3, 7)) SMDGv = nx.subgraph(self.MDG, range(3, 7)) for G in self.graphs + [SGv, SDGv, SMGv, SMDGv]: SG = nx.induced_subgraph(G, [4, 5, 6]) assert list(SG) == [4, 5, 6] SSG = SG.subgraph([6, 7]) assert list(SSG) == [6] # subgraph-subgraph chain is short-cut in base class method assert SSG._graph is G def test_restricted_induced_subgraph_chains(self): """Test subgraph chains that both restrict and show nodes/edges. A restricted_view subgraph should allow induced subgraphs using G.subgraph that automagically without a chain (meaning the result is a subgraph view of the original graph not a subgraph-of-subgraph. """ hide_nodes = [3, 4, 5] hide_edges = [(6, 7)] RG = nx.restricted_view(self.G, hide_nodes, hide_edges) nodes = [4, 5, 6, 7, 8] SG = nx.induced_subgraph(RG, nodes) SSG = RG.subgraph(nodes) assert RG._graph is self.G assert SSG._graph is self.G assert SG._graph is RG assert edges_equal(SG.edges, SSG.edges) # should be same as morphing the graph CG = self.G.copy() CG.remove_nodes_from(hide_nodes) CG.remove_edges_from(hide_edges) assert edges_equal(CG.edges(nodes), SSG.edges) CG.remove_nodes_from([0, 1, 2, 3]) assert edges_equal(CG.edges, SSG.edges) # switch order: subgraph first, then restricted view SSSG = self.G.subgraph(nodes) RSG = nx.restricted_view(SSSG, hide_nodes, hide_edges) assert RSG._graph is not self.G assert edges_equal(RSG.edges, CG.edges) def test_subgraph_copy(self): for origG in self.graphs: G = nx.Graph(origG) SG = G.subgraph([4, 5, 6]) H = SG.copy() assert type(G) is type(H) def test_subgraph_todirected(self): SG = nx.induced_subgraph(self.G, [4, 5, 6]) SSG = SG.to_directed() assert sorted(SSG) == [4, 5, 6] assert sorted(SSG.edges) == [(4, 5), (5, 4), (5, 6), (6, 5)] def test_subgraph_toundirected(self): SG = nx.induced_subgraph(self.G, [4, 5, 6]) SSG = SG.to_undirected() assert list(SSG) == [4, 5, 6] assert sorted(SSG.edges) == [(4, 5), (5, 6)] def test_reverse_subgraph_toundirected(self): G = self.DG.reverse(copy=False) SG = G.subgraph([4, 5, 6]) SSG = SG.to_undirected() assert list(SSG) == [4, 5, 6] assert sorted(SSG.edges) == [(4, 5), (5, 6)] def test_reverse_reverse_copy(self): G = self.DG.reverse(copy=False) H = G.reverse(copy=True) assert H.nodes == self.DG.nodes assert H.edges == self.DG.edges G = self.MDG.reverse(copy=False) H = G.reverse(copy=True) assert H.nodes == self.MDG.nodes assert H.edges == self.MDG.edges def test_subgraph_edgesubgraph_toundirected(self): G = self.G.copy() SG = G.subgraph([4, 5, 6]) SSG = SG.edge_subgraph([(4, 5), (5, 4)]) USSG = SSG.to_undirected() assert list(USSG) == [4, 5] assert sorted(USSG.edges) == [(4, 5)] def test_copy_subgraph(self): G = self.G.copy() SG = G.subgraph([4, 5, 6]) CSG = SG.copy(as_view=True) DCSG = SG.copy(as_view=False) assert hasattr(CSG, "_graph") # is a view assert not hasattr(DCSG, "_graph") # not a view def test_copy_disubgraph(self): G = self.DG.copy() SG = G.subgraph([4, 5, 6]) CSG = SG.copy(as_view=True) DCSG = SG.copy(as_view=False) assert hasattr(CSG, "_graph") # is a view assert not hasattr(DCSG, "_graph") # not a view def test_copy_multidisubgraph(self): G = self.MDG.copy() SG = G.subgraph([4, 5, 6]) CSG = SG.copy(as_view=True) DCSG = SG.copy(as_view=False) assert hasattr(CSG, "_graph") # is a view assert not hasattr(DCSG, "_graph") # not a view def test_copy_multisubgraph(self): G = self.MG.copy() SG = G.subgraph([4, 5, 6]) CSG = SG.copy(as_view=True) DCSG = SG.copy(as_view=False) assert hasattr(CSG, "_graph") # is a view assert not hasattr(DCSG, "_graph") # not a view def test_copy_of_view(self): G = nx.MultiGraph(self.MGv) assert G.__class__.__name__ == "MultiGraph" G = G.copy(as_view=True) assert G.__class__.__name__ == "MultiGraph" def test_subclass(self): class MyGraph(nx.DiGraph): def my_method(self): return "me" def to_directed_class(self): return MyGraph() for origG in self.graphs: G = MyGraph(origG) SG = G.subgraph([4, 5, 6]) H = SG.copy() assert SG.my_method() == "me" assert H.my_method() == "me" assert 3 not in H or 3 in SG
TestChainsOfViews
python
keras-team__keras
keras/src/utils/summary_utils_test.py
{ "start": 231, "end": 4917 }
class ____(testing.TestCase): @parameterized.parameters([("adam",), (None,)]) @pytest.mark.requires_trainable_backend def test_print_model_summary(self, optimizer): inputs = layers.Input((2,)) outputs = layers.Dense(3)(inputs) model = models.Model(inputs, outputs) model.compile(optimizer=optimizer, loss="mse", metrics=["mse"]) if optimizer: # Trigger the optimizer weights creation model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3])) summary_content = [] def print_to_variable(text, line_break=False): summary_content.append(text) try: summary_utils.print_summary(model, print_fn=print_to_variable) summary_content = "\n".join(summary_content) if optimizer: self.assertIn("Total params: 29", summary_content) self.assertIn("Trainable params: 9", summary_content) self.assertIn("Non-trainable params: 0", summary_content) self.assertIn("Optimizer params: 20", summary_content) else: self.assertIn("Total params: 9", summary_content) self.assertIn("Trainable params: 9", summary_content) self.assertIn("Non-trainable params: 0", summary_content) self.assertNotIn("Optimizer params", summary_content) except ImportError: pass def test_print_model_summary_custom_build(self): class MyModel(models.Model): def __init__(self): super().__init__() self.dense1 = layers.Dense(4, activation="relu") self.dense2 = layers.Dense(2, activation="softmax") self.unbuilt_dense = layers.Dense(1) def build(self, input_shape): self.dense1.build(input_shape) input_shape = self.dense1.compute_output_shape(input_shape) self.dense2.build(input_shape) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() model.build((None, 2)) summary_content = [] def print_to_variable(text, line_break=False): summary_content.append(text) summary_utils.print_summary(model, print_fn=print_to_variable) summary_content = "\n".join(summary_content) self.assertIn("(None, 4)", summary_content) # dense1 self.assertIn("(None, 2)", summary_content) # dense2 self.assertIn("?", summary_content) # unbuilt_dense self.assertIn("Total params: 22", summary_content) self.assertIn("Trainable params: 22", summary_content) self.assertIn("Non-trainable params: 0", summary_content) def test_print_model_summary_op_as_layer(self): inputs = layers.Input((2,)) x = layers.Dense(4)(inputs) outputs = ops.mean(x) model = models.Model(inputs, outputs) summary_content = [] def print_to_variable(text, line_break=False): summary_content.append(text) summary_utils.print_summary( model, print_fn=print_to_variable, show_trainable=True ) summary_content = "\n".join(summary_content) self.assertIn("(None, 4)", summary_content) # dense self.assertIn("Y", summary_content) # dense self.assertIn("()", summary_content) # mean self.assertIn("-", summary_content) # mean self.assertIn("Total params: 12", summary_content) self.assertIn("Trainable params: 12", summary_content) self.assertIn("Non-trainable params: 0", summary_content) def test_print_model_summary_with_mha(self): # In Keras <= 3.6, MHA exposes `output_shape` property which breaks this # test. class MyModel(models.Model): def __init__(self): super().__init__() self.mha = layers.MultiHeadAttention(2, 2, output_shape=(4,)) def call(self, inputs): return self.mha(inputs, inputs, inputs) model = MyModel() model(np.ones((1, 2, 2))) summary_content = [] def print_to_variable(text, line_break=False): summary_content.append(text) summary_utils.print_summary(model, print_fn=print_to_variable) summary_content = "\n".join(summary_content) self.assertIn("(1, 2, 4)", summary_content) # mha self.assertIn("Total params: 56", summary_content) self.assertIn("Trainable params: 56", summary_content) self.assertIn("Non-trainable params: 0", summary_content)
SummaryUtilsTest
python
pandas-dev__pandas
pandas/core/nanops.py
{ "start": 1184, "end": 2346 }
class ____: def __init__(self, *dtypes: Dtype) -> None: super().__init__() self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes) def check(self, obj) -> bool: return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes) def __call__(self, f: F) -> F: @functools.wraps(f) def _f(*args, **kwargs): obj_iter = itertools.chain(args, kwargs.values()) if any(self.check(obj) for obj in obj_iter): f_name = f.__name__.replace("nan", "") raise TypeError( f"reduction operation '{f_name}' not allowed for this dtype" ) try: return f(*args, **kwargs) except ValueError as e: # we want to transform an object array # ValueError message to the more typical TypeError # e.g. this is normally a disallowed function on # object arrays that contain strings if is_object_dtype(args[0]): raise TypeError(e) from e raise return cast(F, _f)
disallow
python
openai__openai-python
src/openai/types/realtime/realtime_audio_formats.py
{ "start": 551, "end": 679 }
class ____(BaseModel): type: Optional[Literal["audio/pcmu"]] = None """The audio format. Always `audio/pcmu`."""
AudioPCMU
python
apache__airflow
providers/celery/tests/integration/celery/test_celery_executor.py
{ "start": 3830, "end": 11787 }
class ____: def setup_method(self) -> None: db.clear_db_runs() db.clear_db_jobs() def teardown_method(self) -> None: db.clear_db_runs() db.clear_db_jobs() @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize("broker_url", _prepare_test_bodies()) @pytest.mark.parametrize( "executor_config", [ pytest.param({}, id="no_executor_config"), pytest.param( { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", resources=k8s.V1ResourceRequirements( requests={ "cpu": "100m", "memory": "384Mi", }, limits={ "cpu": 1, "memory": "500Mi", }, ), ) ] ) ) }, id="pod_override_executor_config", ), ], ) def test_celery_integration(self, broker_url, executor_config): from airflow.providers.celery.executors import celery_executor, celery_executor_utils def fake_execute_workload(command): if "fail" in command: raise AirflowException("fail") with _prepare_app(broker_url, execute=fake_execute_workload) as app: executor = celery_executor.CeleryExecutor() assert executor.tasks == {} executor.start() with start_worker(app=app, logfile=sys.stdout, loglevel="info"): ti = workloads.TaskInstance.model_construct( id=uuid7(), task_id="success", dag_id="id", run_id="abc", try_number=0, priority_weight=1, queue=celery_executor_utils.celery_configuration["task_default_queue"], executor_config=executor_config, ) keys = [ TaskInstanceKey("id", "success", "abc", 0, -1), TaskInstanceKey("id", "fail", "abc", 0, -1), ] for w in ( workloads.ExecuteTask.model_construct(ti=ti), workloads.ExecuteTask.model_construct(ti=ti.model_copy(update={"task_id": "fail"})), ): executor.queue_workload(w, session=None) executor.trigger_tasks(open_slots=10) for _ in range(20): num_tasks = len(executor.tasks.keys()) if num_tasks == 2: break logger.info( "Waiting 0.1 s for tasks to be processed asynchronously. Processed so far %d", num_tasks, ) sleep(0.4) assert list(executor.tasks.keys()) == keys assert executor.event_buffer[keys[0]][0] == State.QUEUED assert executor.event_buffer[keys[1]][0] == State.QUEUED executor.end(synchronous=True) assert executor.event_buffer[keys[0]][0] == State.SUCCESS assert executor.event_buffer[keys[1]][0] == State.FAILED assert keys[0] not in executor.tasks assert keys[1] not in executor.tasks assert executor.queued_tasks == {} def test_error_sending_task(self): from airflow.providers.celery.executors import celery_executor def fake_task(): pass with _prepare_app(execute=fake_task): # fake_execute_command takes no arguments while execute_workload takes 1, # which will cause TypeError when calling task.apply_async() executor = celery_executor.CeleryExecutor() task = BashOperator( task_id="test", bash_command="true", dag=DAG(dag_id="dag_id"), start_date=datetime.now(), ) if AIRFLOW_V_3_0_PLUS: ti = TaskInstance(task=task, run_id="abc", dag_version_id=uuid6.uuid7()) else: ti = TaskInstance(task=task, run_id="abc") workload = workloads.ExecuteTask.model_construct( ti=workloads.TaskInstance.model_validate(ti, from_attributes=True), ) key = (task.dag.dag_id, task.task_id, ti.run_id, 0, -1) executor.queued_tasks[key] = workload executor.task_publish_retries[key] = 1 executor.heartbeat() assert len(executor.queued_tasks) == 0, "Task should no longer be queued" assert executor.event_buffer[key][0] == State.FAILED def test_retry_on_error_sending_task(self, caplog): """Test that Airflow retries publishing tasks to Celery Broker at least 3 times""" from airflow.providers.celery.executors import celery_executor, celery_executor_utils with ( _prepare_app(), caplog.at_level(logging.INFO), mock.patch.object( # Mock `with timeout()` to _instantly_ fail. celery_executor_utils.timeout, "__enter__", side_effect=AirflowTaskTimeout, ), ): executor = celery_executor.CeleryExecutor() assert executor.task_publish_retries == {} assert executor.task_publish_max_retries == 3, "Assert Default Max Retries is 3" task = BashOperator( task_id="test", bash_command="true", dag=DAG(dag_id="id"), start_date=datetime.now(), ) if AIRFLOW_V_3_0_PLUS: ti = TaskInstance(task=task, run_id="abc", dag_version_id=uuid6.uuid7()) else: ti = TaskInstance(task=task, run_id="abc") workload = workloads.ExecuteTask.model_construct( ti=workloads.TaskInstance.model_validate(ti, from_attributes=True), ) key = (task.dag.dag_id, task.task_id, ti.run_id, 0, -1) executor.queued_tasks[key] = workload # Test that when heartbeat is called again, task is published again to Celery Queue executor.heartbeat() assert dict(executor.task_publish_retries) == {key: 1} assert len(executor.queued_tasks) == 1, "Task should remain in queue" assert executor.event_buffer == {} assert f"[Try 1 of 3] Task Timeout Error for Task: ({key})." in caplog.text executor.heartbeat() assert dict(executor.task_publish_retries) == {key: 2} assert len(executor.queued_tasks) == 1, "Task should remain in queue" assert executor.event_buffer == {} assert f"[Try 2 of 3] Task Timeout Error for Task: ({key})." in caplog.text executor.heartbeat() assert dict(executor.task_publish_retries) == {key: 3} assert len(executor.queued_tasks) == 1, "Task should remain in queue" assert executor.event_buffer == {} assert f"[Try 3 of 3] Task Timeout Error for Task: ({key})." in caplog.text executor.heartbeat() assert dict(executor.task_publish_retries) == {} assert len(executor.queued_tasks) == 0, "Task should no longer be in queue" assert executor.event_buffer[key][0] == State.FAILED
TestCeleryExecutor
python
yandexdataschool__Practical_RL
week06_policy_based/atari_wrappers.py
{ "start": 7292, "end": 7831 }
class ____(ObservationWrapper): """ Image shape to num_channels x height x width and normalization """ def __init__(self, env): super().__init__(env) old_shape = self.observation_space.shape self.observation_space = Box( low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]), dtype=np.float32, ) def observation(self, observation): return np.transpose(observation, (2, 0, 1)).astype(np.float32) / 255.0
SwapImageAxes
python
pyparsing__pyparsing
examples/adventureEngine.py
{ "start": 7605, "end": 8494 }
class ____(Command): def __init__(self, quals): super().__init__("DOORS", "looking for doors") @staticmethod def help_description(): return "DOORS - display what doors are visible from this room" def _do_command(self, player): rm = player.room numDoors = sum(1 for r in rm.doors if r is not None) if numDoors == 0: reply = "There are no doors in any direction." else: if numDoors == 1: reply = "There is a door to the " else: reply = "There are doors to the " doorNames = [ {0: "north", 1: "south", 2: "east", 3: "west"}[i] for i, d in enumerate(rm.doors) if d is not None ] reply += enumerate_doors(doorNames) reply += "." print(reply)
DoorsCommand
python
pypa__pipenv
pipenv/vendor/pipdeptree/_cli.py
{ "start": 279, "end": 606 }
class ____(Namespace): freeze: bool python: str all: bool local_only: bool user_only: bool warn: WarningType reverse: bool packages: str exclude: str json: bool json_tree: bool mermaid: bool output_format: str | None depth: float encoding: str license: bool
Options
python
marshmallow-code__apispec
tests/schemas.py
{ "start": 1233, "end": 1655 }
class ____(Schema): number_auto_default = fields.Int(load_default=12) number_manual_default = fields.Int(load_default=12, metadata={"default": 42}) string_callable_default = fields.Str(load_default=lambda: "Callable") string_manual_default = fields.Str( load_default=lambda: "Callable", metadata={"default": "Manual"} ) numbers = fields.List(fields.Int, load_default=list)
DefaultValuesSchema
python
langchain-ai__langchain
libs/core/langchain_core/structured_query.py
{ "start": 4400, "end": 5123 }
class ____(Expr): """Structured query.""" query: str """Query string.""" filter: FilterDirective | None """Filtering expression.""" limit: int | None """Limit on the number of results.""" def __init__( self, query: str, filter: FilterDirective | None, # noqa: A002 limit: int | None = None, **kwargs: Any, ) -> None: """Create a StructuredQuery. Args: query: The query string. filter: The filtering expression. limit: The limit on the number of results. """ # super exists from BaseModel super().__init__(query=query, filter=filter, limit=limit, **kwargs)
StructuredQuery
python
pytorch__pytorch
torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py
{ "start": 752, "end": 2697 }
class ____: """ Convenience wrapper around python's ``subprocess.Popen``. Keeps track of meta-objects associated to the process (e.g. stdout and stderr redirect fds). """ def __init__( self, entrypoint: str, args: tuple, env: dict[str, str], stdout: str | None, stderr: str | None, local_rank_id: int, numa_options: NumaOptions | None, ): self._stdout = open(stdout, "w") if stdout else None self._stderr = open(stderr, "w") if stderr else None # inherit parent environment vars env_vars = os.environ.copy() env_vars.update(env) args_str = (entrypoint, *[str(e) for e in args]) args_str = maybe_wrap_command_args_with_numa_binding( args_str, gpu_index=local_rank_id, numa_options=numa_options, ) self.local_rank_id = local_rank_id self.proc: Popen = self._popen(args_str, env_vars) def _popen(self, args: tuple, env: dict[str, str]) -> Popen: kwargs: dict[str, Any] = {} if not IS_WINDOWS: kwargs["start_new_session"] = True return Popen( # pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes], # _PathLike[str], bytes, str]], bytes, str]` for 1st param but got # `Tuple[str, *Tuple[Any, ...]]`. args=args, env=env, stdout=self._stdout, stderr=self._stderr, **kwargs, ) def close(self, death_sig: signal.Signals | None = None) -> None: if not death_sig: death_sig = _get_default_signal() if IS_WINDOWS: self.proc.send_signal(death_sig) else: os.killpg(self.proc.pid, death_sig) if self._stdout: self._stdout.close() if self._stderr: self._stderr.close()
SubprocessHandler
python
doocs__leetcode
lcci/17.04.Missing Number/Solution3.py
{ "start": 0, "end": 169 }
class ____: def missingNumber(self, nums: List[int]) -> int: ans = 0 for i, x in enumerate(nums, 1): ans ^= i ^ x return ans
Solution
python
jazzband__django-oauth-toolkit
tests/common_testing.py
{ "start": 742, "end": 894 }
class ____: @classmethod def setUpClass(cls): cls.databases = retrieve_current_databases() super().setUpClass()
OAuth2ProviderBase
python
allegroai__clearml
clearml/backend_api/services/v2_20/models.py
{ "start": 51030, "end": 52231 }
class ____(Response): """ Response of models.delete_metadata endpoint. :param updated: Number of models updated (0 or 1) :type updated: int """ _service = "models" _action = "delete_metadata" _version = "2.20" _schema = { "definitions": {}, "properties": { "updated": { "description": "Number of models updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(DeleteMetadataResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
DeleteMetadataResponse
python
coleifer__peewee
tests/sqlcipher_ext.py
{ "start": 3678, "end": 5559 }
class ____(CleanUpModelTestCase): database = ext_db requires = [Note] def setUp(self): super(SqlCipherExtTestCase, self).setUp() FTSNote._meta.database = ext_db FTSNote.drop_table(True) FTSNote.create_table(tokenize='porter', content=Note.content) def tearDown(self): FTSNote.drop_table(True) super(SqlCipherExtTestCase, self).tearDown() def test_fts(self): strings = [ 'python and peewee for working with databases', 'relational databases are the best', 'sqlite is the best relational database', 'sqlcipher is a cool database extension'] for s in strings: Note.create(content=s) FTSNote.rebuild() query = (FTSNote .select(FTSNote, FTSNote.rank().alias('score')) .where(FTSNote.match('relational databases')) .order_by(SQL('score').desc())) notes = [note.content for note in query] self.assertEqual(notes, [ 'relational databases are the best', 'sqlite is the best relational database']) alt_conn = SqliteDatabase(ext_db.database) self.assertRaises( DatabaseError, alt_conn.execute_sql, 'SELECT * FROM "%s"' % (FTSNote._meta.table_name)) def test_func(self): Note.create(content='hello') Note.create(content='baz') Note.create(content='nug') query = (Note .select(Note.content, fn.shazam(Note.content).alias('shz')) .order_by(Note.id) .dicts()) results = list(query) self.assertEqual(results, [ {'content': 'hello', 'shz': 'aaf4c'}, {'content': 'baz', 'shz': 'bbe96'}, {'content': 'nug', 'shz': '52616'}, ])
SqlCipherExtTestCase
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 202519, "end": 206996 }
class ____(rv_continuous): r"""A log gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `loggamma` is: .. math:: f(x, c) = \frac{\exp(c x - \exp(x))} {\Gamma(c)} for all :math:`x, c > 0`. Here, :math:`\Gamma` is the gamma function (`scipy.special.gamma`). `loggamma` takes ``c`` as a shape parameter for :math:`c`. %(after_notes)s %(example)s """ def _shape_info(self): return [_ShapeInfo("c", False, (0, np.inf), (False, False))] def _rvs(self, c, size=None, random_state=None): # Use the property of the gamma distribution Gamma(c) # Gamma(c) ~ Gamma(c + 1)*U**(1/c), # where U is uniform on [0, 1]. (See, e.g., # G. Marsaglia and W.W. Tsang, "A simple method for generating gamma # variables", https://doi.org/10.1145/358407.358414) # So # log(Gamma(c)) ~ log(Gamma(c + 1)) + log(U)/c # Generating a sample with this formulation is a bit slower # than the more obvious log(Gamma(c)), but it avoids loss # of precision when c << 1. return (np.log(random_state.gamma(c + 1, size=size)) + np.log(random_state.uniform(size=size))/c) def _pdf(self, x, c): # loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c) return np.exp(c*x-np.exp(x)-sc.gammaln(c)) def _logpdf(self, x, c): return c*x - np.exp(x) - sc.gammaln(c) def _cdf(self, x, c): # This function is gammainc(c, exp(x)), where gammainc(c, z) is # the regularized incomplete gamma function. # The first term in a series expansion of gamminc(c, z) is # z**c/Gamma(c+1); see 6.5.29 of Abramowitz & Stegun (and refer # back to 6.5.1, 6.5.2 and 6.5.4 for the relevant notation). # This can also be found in the wikipedia article # https://en.wikipedia.org/wiki/Incomplete_gamma_function. # Here we use that formula when x is sufficiently negative that # exp(x) will result in subnormal numbers and lose precision. # We evaluate the log of the expression first to allow the possible # cancellation of the terms in the division, and then exponentiate. # That is, # exp(x)**c/Gamma(c+1) = exp(log(exp(x)**c/Gamma(c+1))) # = exp(c*x - gammaln(c+1)) return xpx.apply_where( x < _LOGXMIN, (x, c), lambda x, c: np.exp(c*x - sc.gammaln(c+1)), lambda x, c: sc.gammainc(c, np.exp(x))) def _ppf(self, q, c): # The expression used when g < _XMIN inverts the one term expansion # given in the comments of _cdf(). g = sc.gammaincinv(c, q) return xpx.apply_where( g < _XMIN, (g, q, c), lambda g, q, c: (np.log(q) + sc.gammaln(c+1))/c, lambda g, q, c: np.log(g)) def _sf(self, x, c): # See the comments for _cdf() for how x < _LOGXMIN is handled. return xpx.apply_where( x < _LOGXMIN, (x, c), lambda x, c: -np.expm1(c*x - sc.gammaln(c+1)), lambda x, c: sc.gammaincc(c, np.exp(x))) def _isf(self, q, c): # The expression used when g < _XMIN inverts the complement of # the one term expansion given in the comments of _cdf(). g = sc.gammainccinv(c, q) return xpx.apply_where( g < _XMIN, (g, q, c), lambda g, q, c: (np.log1p(-q) + sc.gammaln(c+1))/c, lambda g, q, c: np.log(g)) def _stats(self, c): # See, for example, "A Statistical Study of Log-Gamma Distribution", by # Ping Shing Chan (thesis, McMaster University, 1993). mean = sc.digamma(c) var = sc.polygamma(1, c) skewness = sc.polygamma(2, c) / np.power(var, 1.5) excess_kurtosis = sc.polygamma(3, c) / (var*var) return mean, var, skewness, excess_kurtosis def _entropy(self, c): def regular(c): h = sc.gammaln(c) - c * sc.digamma(c) + c return h def asymptotic(c): # using asymptotic expansions for gammaln and psi (see gh-18093) term = -0.5*np.log(c) + c**-1./6 - c**-3./90 + c**-5./210 h = norm._entropy() + term return h return xpx.apply_where(c >= 45, c, asymptotic, regular) loggamma = loggamma_gen(name='loggamma')
loggamma_gen
python
astropy__astropy
astropy/units/tests/test_quantity_non_ufuncs.py
{ "start": 8966, "end": 13976 }
class ____(InvariantUnitTestSetup): def test_copy(self): self.check(np.copy) # Also as kwarg copy = np.copy(a=self.q) assert_array_equal(copy, self.q) @pytest.mark.skipif(not NUMPY_LT_2_0, reason="np.asfarray is removed in NumPy 2.0") def test_asfarray(self): self.check(np.asfarray) # noqa: NPY201 farray = np.asfarray(a=self.q) # noqa: NPY201 assert_array_equal(farray, self.q) def test_empty_like(self): o = np.empty_like(self.q) assert o.shape == (3, 3) assert isinstance(o, u.Quantity) assert o.unit == self.q.unit o2 = np.empty_like(prototype=self.q) assert o2.shape == (3, 3) assert isinstance(o2, u.Quantity) assert o2.unit == self.q.unit o3 = np.empty_like(self.q, subok=False) assert type(o3) is np.ndarray def test_zeros_like(self): self.check(np.zeros_like) o2 = np.zeros_like(a=self.q) assert_array_equal(o2, self.q * 0.0) def test_ones_like(self): self.check(np.ones_like) def test_full_like(self): o = np.full_like(self.q, 0.5 * u.km) expected = np.empty_like(self.q.value) * u.m expected[...] = 0.5 * u.km assert np.all(o == expected) with pytest.raises(u.UnitsError): np.full_like(self.q, 0.5 * u.s) if not NUMPY_LT_2_0: def test_astype(self): int32q = self.q.astype("int32") assert_array_equal(np.astype(int32q, "int32"), int32q) @pytest.mark.parametrize( "args, kwargs, expected", [ pytest.param( (1 * u.radian,), {}, np.arange(1, dtype=float), id="pos: stop", ), pytest.param( (0 * u.degree, 1 * u.radian), {}, np.arange(1, dtype=float), id="pos: start, stop", ), pytest.param( (0 * u.degree, 1 * u.radian, 1 * u.arcsec), {}, np.arange(ARCSEC_PER_RADIAN, dtype=float), id="pos: start, stop, step", ), pytest.param( (0 * u.degree, 1 * u.radian), {"step": 1 * u.arcsec}, np.arange(ARCSEC_PER_RADIAN, dtype=float), id="pos: start, stop; kw: step", ), pytest.param( (0 * u.radian,), {"stop": 5 * u.radian}, np.rad2deg(np.arange(5, dtype=float) * ARCSEC_PER_DEGREE), id="pos: start; kw: stop", ), pytest.param( (10,), {"step": 2}, np.arange(10, step=2, dtype=float) * ARCSEC_PER_DEGREE, id="pos: stop; kw: step; unit from like", ), pytest.param( (10 * u.radian, None), {}, np.rad2deg(np.arange(10, dtype=float) * ARCSEC_PER_DEGREE), id="pos: stop, None", ), pytest.param( (10 * u.radian, None, 1), {}, np.rad2deg(np.arange(10, dtype=float) * ARCSEC_PER_DEGREE), id="pos: stop, None, 1", ), ], ) def test_arange(self, args, kwargs, expected): arr = np.arange(*args, **kwargs, like=u.Quantity([], u.degree)) assert type(arr) is u.Quantity if any(hasattr(arg, "unit") for arg in itertools.chain(args, kwargs.keys())): expected_unit = u.radian else: expected_unit = u.degree assert arr.unit == expected_unit assert arr.dtype == expected.dtype assert_allclose(arr.to_value(u.arcsec), expected) def test_arange_like_quantity_subclass(self): class AngularUnits(u.SpecificTypeQuantity): _equivalent_unit = u.radian arr = np.arange( 0 * u.radian, 10 * u.radian, 1 * u.radian, like=AngularUnits([], u.radian) ) assert type(arr) is AngularUnits assert arr.unit == u.radian assert arr.dtype == np.dtype(float) assert_array_equal(arr.value, np.arange(10)) def test_arange_default_unit(self): arr = np.arange(10, like=u.Quantity([], u.s)) assert type(arr) is u.Quantity assert arr.unit == u.s def test_arange_invalid_inputs(self): with pytest.raises( TypeError, match="stop without a unit cannot be combined with start or step", ): np.arange(0 * u.radian, 10, like=u.Quantity([], u.s)) def test_arange_unit_from_stop(self): Q = 1 * u.km start = 1 * u.s stop = 10 * u.min a = np.arange(start, stop, like=Q) b = np.arange(start, stop=stop, like=Q) assert a.unit == stop.unit assert b.unit == stop.unit assert_array_equal(a.value, b.value)
TestCopyAndCreation
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 870613, "end": 872267 }
class ____(sgqlc.types.Type): """This aggregates pull requests opened by a user within one repository. """ __schema__ = github_schema __field_names__ = ("contributions", "repository") contributions = sgqlc.types.Field( sgqlc.types.non_null(CreatedPullRequestContributionConnection), graphql_name="contributions", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ("order_by", sgqlc.types.Arg(ContributionOrder, graphql_name="orderBy", default={"direction": "DESC"})), ) ), ) """The pull request contributions. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. * `order_by` (`ContributionOrder`): Ordering options for contributions returned from the connection. (default: `{direction: DESC}`) """ repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository") """The repository in which the pull requests were opened."""
PullRequestContributionsByRepository
python
dagster-io__dagster
python_modules/dagster/dagster/components/core/defs_module.py
{ "start": 1882, "end": 2024 }
class ____(BaseModel): """Describes dependencies for a component to load.""" env: Optional[list[str]] = None
ComponentRequirementsModel
python
huggingface__transformers
src/transformers/models/doge/modeling_doge.py
{ "start": 11191, "end": 17546 }
class ____(nn.Module): def __init__(self, config: DogeConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.keep_window_size = config.keep_window_size self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) # dynamic mask for the QK^T attention weights matrix self.A = nn.Parameter(torch.zeros(config.num_key_value_heads)) self.dt_proj = nn.Linear( config.num_key_value_heads * self.head_dim, config.num_key_value_heads, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.q_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps) self.k_norm = DogeRMSNorm(self.head_dim, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # calculate dynamic mask from value_states dt_states = self.dt_proj( value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1) ) dt_states = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2) attn_mask = self.prepare_dynamic_mask( hidden_states=hidden_states, dt_states=dt_states, keep_window_size=self.keep_window_size, attention_mask=attention_mask, ) attn_mask = repeat_kv(attn_mask, self.num_key_value_groups) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask=attn_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights def prepare_dynamic_mask( self, hidden_states: torch.Tensor, dt_states: torch.Tensor, keep_window_size: int = 2048, attention_mask: Optional[torch.Tensor] = None, ): """ The core idea of DMA is to calculate the dynamic attention mask to mask the tokens that should be masked, so as to form sparse attention. Combine `dt_states` with `attention_mask` to generate the final `attn_mask`. Args: hidden_states (`torch.Tensor`): The input hidden_states, used to determine the minimum value of the current input precision. dt_states (`torch.Tensor`): dt_states of shape `(batch_size, num_heads, key_sequence_length)`. keep_window_size (`int`): The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value. attention_mask (`torch.Tensor`, *optional*): attention mask of shape `(batch_size, 1, query_sequence_length, key_sequence_length)`. """ min_dtype = torch.finfo(hidden_states.dtype).min dtype = hidden_states.dtype attn_mask = dt_states[:, :, None, :].expand( -1, -1, hidden_states.shape[1], -1 ) # [batch_size, num_heads, query_len, key_len] if attention_mask is not None and not isinstance(attention_mask, BlockMask): if attention_mask.dtype == torch.bool: dtype = hidden_states.dtype attention_mask = torch.where( attention_mask, torch.tensor(0.0, device=attention_mask.device, dtype=dtype), min_dtype ) attn_mask = attn_mask.masked_fill(attention_mask[:, :, :, : attn_mask.shape[-1]] != 0, min_dtype) if attn_mask.shape[-1] > keep_window_size: active_mask = torch.zeros_like(attn_mask, dtype=dtype, device=attn_mask.device) topk_indices = torch.topk(attn_mask, keep_window_size, dim=-1, largest=True, sorted=False).indices active_mask = active_mask.scatter(-1, topk_indices, 1.0) attn_mask = attn_mask.masked_fill(active_mask == 0.0, min_dtype) return attn_mask
DogeAttention
python
openai__gym
tests/wrappers/test_video_recorder.py
{ "start": 136, "end": 356 }
class ____(gym.Env): metadata = {"render_modes": ["rgb_array_list"]} def __init__(self, render_mode="rgb_array_list"): self.render_mode = render_mode def render(self): pass
BrokenRecordableEnv
python
readthedocs__readthedocs.org
readthedocs/filetreediff/dataclasses.py
{ "start": 2144, "end": 3146 }
class ____: def __init__( self, path: str, status: FileTreeDiffFileStatus, diff: "FileTreeDiff", ): self.path = path self.status = status self.current_version = diff.current_version self.current_version_build = diff.current_version_build self.base_version = diff.base_version self.base_version_build = diff.base_version_build self._resolver = diff._resolver @property def url(self): """URL to the file in the current version.""" return self._resolver.resolve_version( project=self.current_version.project, version=self.current_version, filename=self.path, ) @property def base_url(self): """URL to the file in the base version.""" return self._resolver.resolve_version( project=self.base_version.project, version=self.base_version, filename=self.path, )
FileTreeDiffFile
python
great-expectations__great_expectations
great_expectations/core/expectation_diagnostics/supporting_types.py
{ "start": 3126, "end": 3453 }
class ____(SerializableDictDot): """Captures information about a specific Renderer within an Expectation. Used within the ExpectationDiagnostic object.""" # noqa: E501 # FIXME CoP name: str is_supported: bool is_standard: bool samples: List[RendererTestDiagnostics] @dataclass
ExpectationRendererDiagnostics
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py
{ "start": 35350, "end": 46076 }
class ____(GoogleCloudBaseOperator): """ Sync an S3 bucket with a Google Cloud Storage bucket using the Google Cloud Storage Transfer Service. .. warning:: This operator is NOT idempotent. If you run it many times, many transfer jobs will be created in the Google Cloud. **Example**: .. code-block:: python s3_to_gcs_transfer_op = S3ToGoogleCloudStorageTransferOperator( task_id="s3_to_gcs_transfer_example", s3_bucket="my-s3-bucket", project_id="my-gcp-project", gcs_bucket="my-gcs-bucket", dag=my_dag, ) :param s3_bucket: The S3 bucket where to find the objects. (templated) :param gcs_bucket: The destination Google Cloud Storage bucket where you want to store the files. (templated) :param s3_path: Optional root path where the source objects are. (templated) :param gcs_path: Optional root path for transferred objects. (templated) :param project_id: Optional ID of the Google Cloud Console project that owns the job :param aws_conn_id: The source S3 connection :param gcp_conn_id: The destination connection ID to use when connecting to Google Cloud Storage. :param description: Optional transfer service job description :param schedule: Optional transfer service schedule; If not set, run transfer job once as soon as the operator runs The format is described https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs. With two additional improvements: * dates they can be passed as :class:`datetime.date` * times they can be passed as :class:`datetime.time` :param object_conditions: Optional transfer service object conditions; see https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec :param transfer_options: Optional transfer service transfer options; see https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec :param wait: Wait for transfer to finish. It must be set to True, if 'delete_job_after_completion' is set to True. :param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified. :param google_impersonation_chain: Optional Google service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param delete_job_after_completion: If True, delete the job after complete. If set to True, 'wait' must be set to True. :param aws_role_arn: Optional AWS role ARN for workload identity federation. This will override the `aws_conn_id` for authentication between GCP and AWS; see https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#AwsS3Data :param deferrable: Run operator in the deferrable mode. """ template_fields: Sequence[str] = ( "gcp_conn_id", "s3_bucket", "gcs_bucket", "s3_path", "gcs_path", "description", "object_conditions", "google_impersonation_chain", "aws_role_arn", ) ui_color = "#e09411" def __init__( self, *, s3_bucket: str, gcs_bucket: str, s3_path: str | None = None, gcs_path: str | None = None, project_id: str = PROVIDE_PROJECT_ID, aws_conn_id: str | None = "aws_default", gcp_conn_id: str = "google_cloud_default", description: str | None = None, schedule: dict | None = None, object_conditions: dict | None = None, transfer_options: dict | None = None, wait: bool = True, timeout: float | None = None, google_impersonation_chain: str | Sequence[str] | None = None, delete_job_after_completion: bool = False, aws_role_arn: str | None = None, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ) -> None: super().__init__(**kwargs) self.s3_bucket = s3_bucket self.gcs_bucket = gcs_bucket self.s3_path = s3_path self.gcs_path = gcs_path self.project_id = project_id self.aws_conn_id = aws_conn_id self.gcp_conn_id = gcp_conn_id self.description = description self.schedule = schedule self.object_conditions = object_conditions self.transfer_options = transfer_options self.wait = wait self.timeout = timeout self.google_impersonation_chain = google_impersonation_chain self.delete_job_after_completion = delete_job_after_completion self.aws_role_arn = aws_role_arn self.deferrable = deferrable self._validate_inputs() self._transfer_job: dict[str, Any] | None = None def _validate_inputs(self) -> None: if self.delete_job_after_completion and not self.wait: raise AirflowException("If 'delete_job_after_completion' is True, then 'wait' must also be True.") def execute(self, context: Context) -> None: hook = CloudDataTransferServiceHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.google_impersonation_chain, ) body = self._create_body() TransferJobPreprocessor(body=body, aws_conn_id=self.aws_conn_id, default_schedule=True).process_body() self._transfer_job = hook.create_transfer_job(body=body) if self.wait: if not self.deferrable: hook.wait_for_transfer_job(self._transfer_job, timeout=self.timeout) if self.delete_job_after_completion: hook.delete_transfer_job(job_name=self._transfer_job[NAME], project_id=self.project_id) else: self.defer( timeout=timedelta(seconds=self.timeout or 60), trigger=CloudStorageTransferServiceCheckJobStatusTrigger( job_name=self._transfer_job[NAME], project_id=self._transfer_job[PROJECT_ID], gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.google_impersonation_chain, ), method_name="execute_complete", ) def execute_complete(self, context: Context, event: dict[str, Any]) -> None: """ Act as a callback for when the trigger fires. This returns immediately. It relies on trigger to throw an exception, otherwise it assumes execution was successful. """ if event["status"] == "error": raise AirflowException(event["message"]) def _create_body(self) -> dict: body = { DESCRIPTION: self.description, STATUS: GcpTransferJobsStatus.ENABLED, TRANSFER_SPEC: { AWS_S3_DATA_SOURCE: { BUCKET_NAME: self.s3_bucket, PATH: normalize_directory_path(self.s3_path), }, GCS_DATA_SINK: { BUCKET_NAME: self.gcs_bucket, PATH: normalize_directory_path(self.gcs_path), }, }, } if self.project_id is not None: body[PROJECT_ID] = self.project_id if self.schedule is not None: body[SCHEDULE] = self.schedule if self.object_conditions is not None: body[TRANSFER_SPEC][OBJECT_CONDITIONS] = self.object_conditions # type: ignore[index] if self.transfer_options is not None: body[TRANSFER_SPEC][TRANSFER_OPTIONS] = self.transfer_options # type: ignore[index] if self.aws_role_arn is not None: body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE][AWS_ROLE_ARN] = self.aws_role_arn # type: ignore[index] return body def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage | None: """Provide OpenLineage OperatorLineage for the S3->GCS transfer.""" from airflow.providers.common.compat.openlineage.facet import Dataset from airflow.providers.google.cloud.openlineage.facets import ( CloudStorageTransferJobFacet, CloudStorageTransferRunFacet, ) from airflow.providers.openlineage.extractors import OperatorLineage input_ds = Dataset( namespace=f"s3://{self.s3_bucket}", name=normalize_directory_path(self.s3_path) or "", ) output_ds = Dataset( namespace=f"gs://{self.gcs_bucket}", name=normalize_directory_path(self.gcs_path) or "", ) job = self._transfer_job or {} job_facet = CloudStorageTransferJobFacet( jobName=job.get(NAME), projectId=job.get(PROJECT_ID, self.project_id), description=job.get(DESCRIPTION, self.description), status=job.get(STATUS), sourceBucket=job.get(TRANSFER_SPEC, {}) .get(AWS_S3_DATA_SOURCE, {}) .get(BUCKET_NAME, self.s3_bucket), sourcePath=job.get(TRANSFER_SPEC, {}).get(AWS_S3_DATA_SOURCE, {}).get(PATH, self.s3_path), targetBucket=job.get(TRANSFER_SPEC, {}).get(GCS_DATA_SINK, {}).get(BUCKET_NAME, self.gcs_bucket), targetPath=job.get(TRANSFER_SPEC, {}).get(GCS_DATA_SINK, {}).get(PATH, self.gcs_path), objectConditions=job.get(TRANSFER_SPEC, {}).get("objectConditions", self.object_conditions), transferOptions=job.get(TRANSFER_SPEC, {}).get("transferOptions", self.transfer_options), schedule=job.get(SCHEDULE, self.schedule), ) run_facet = CloudStorageTransferRunFacet( jobName=job.get(NAME), wait=self.wait, timeout=self.timeout, deferrable=self.deferrable, deleteJobAfterCompletion=self.delete_job_after_completion, ) return OperatorLineage( inputs=[input_ds], outputs=[output_ds], job_facets={"cloudStorageTransferJob": job_facet}, run_facets={"cloudStorageTransferRun": run_facet}, )
CloudDataTransferServiceS3ToGCSOperator
python
tensorflow__tensorflow
tensorflow/compiler/tests/ftrl_test.py
{ "start": 1133, "end": 14800 }
class ____(xla_test.XLATestCase): def initVariableAndGradient(self, dtype): var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.02, 0.04], dtype=dtype) return var0, var1, grads0, grads1 def equivAdagradTest_FtrlPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = ftrl.FtrlOptimizer( 3.0, learning_rate_power=-0.5, # using Adagrad learning rate initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) # Run Ftrl for a few steps for _ in range(steps): ftrl_update.run() return self.evaluate(var0), self.evaluate(var1) def equivAdagradTest_AdagradPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) # Run Adagrad for a few steps for _ in range(steps): adagrad_update.run() return self.evaluate(var0), self.evaluate(var1) def equivGradientDescentTest_FtrlPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = ftrl.FtrlOptimizer( 3.0, learning_rate_power=-0.0, # using Fixed learning rate initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) # Run Ftrl for a few steps for _ in range(steps): ftrl_update.run() return self.evaluate(var0), self.evaluate(var1) def equivGradientDescentTest_GradientDescentPart(self, steps, dtype): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd") sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) # Run GradientDescent for a few steps for _ in range(steps): sgd_update.run() return self.evaluate(var0), self.evaluate(var1) def testFtrlwithoutRegularization(self): for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) # Run 3 steps FTRL for _ in range(3): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-2.60260963, -4.29698515]), self.evaluate(var0), float_rtol=1e-4, half_rtol=1e-2) self.assertAllCloseAccordingToType( np.array([-0.28432083, -0.56694895]), self.evaluate(var1), float_rtol=1e-5, half_rtol=1e-2) def testFtrlwithoutRegularization2(self): for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) # Run 3 steps FTRL for _ in range(3): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-2.55607247, -3.98729396]), self.evaluate(var0), 1e-5, 1e-5, float_rtol=1e-4) self.assertAllCloseAccordingToType( np.array([-0.28232238, -0.56096673]), self.evaluate(var1), 1e-5, 1e-5) def testFtrlWithL1(self): for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-7.66718769, -10.91273689]), self.evaluate(var0), rtol=1e-4, bfloat16_rtol=1e-1, bfloat16_atol=1e-1) self.assertAllCloseAccordingToType( np.array([-0.93460727, -1.86147261]), self.evaluate(var1), rtol=1e-4) def testFtrlWithL1_L2(self): for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-0.24059935, -0.46829352]), self.evaluate(var0), rtol=1e-5) self.assertAllCloseAccordingToType( np.array([-0.02406147, -0.04830509]), self.evaluate(var1), rtol=1e-5) def testFtrlWithL1_L2_L2Shrinkage(self): """Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. """ for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1)) # Run 10 steps FTRL for _ in range(10): ftrl_update.run() # Validate updated params self.assertAllCloseAccordingToType( np.array([-0.22578996, -0.44345799]), self.evaluate(var0), rtol=1e-4) self.assertAllCloseAccordingToType( np.array([-0.14378493, -0.13229476]), self.evaluate(var1), rtol=1e-4) def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self): """Verifies that l2 shrinkage in FTRL does not change lr schedule.""" for dtype in self.float_types: with self.session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.1, 0.2], dtype=dtype) opt0 = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) opt1 = ftrl.FtrlOptimizer( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1)) # Run 10 steps FTRL for _ in range(10): update0.run() update1.run() # var0 is experiencing L2 shrinkage so it should be smaller than var1 # in magnitude. self.assertTrue((var0.eval()**2 < self.evaluate(var1)**2).all()) accum0 = list(opt0._slots["accum"].values())[0].eval() accum1 = list(opt1._slots["accum"].values())[0].eval() # L2 shrinkage should not change how we update grad accumulator. self.assertAllCloseAccordingToType(accum0, accum1) # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical # with Adagrad. # So, basing on these two properties, we test if our implementation of # FTRL-Proximal performs same updates as Adagrad or GradientDescent. def testEquivAdagradwithoutRegularization(self): steps = 5 for dtype in self.float_types: with self.session(), self.test_scope(): val0, val1 = self.equivAdagradTest_FtrlPart(steps, dtype) with self.session(), self.test_scope(): val2, val3 = self.equivAdagradTest_AdagradPart(steps, dtype) self.assertAllCloseAccordingToType(val0, val2, rtol=1e-4, half_rtol=1e-2) self.assertAllCloseAccordingToType(val1, val3, rtol=1e-4, half_rtol=1e-2) def testEquivGradientDescentwithoutRegularization(self): steps = 5 for dtype in self.float_types: with self.session(), self.test_scope(): val0, val1 = self.equivGradientDescentTest_FtrlPart(steps, dtype) with self.session(), self.test_scope(): val2, val3 = self.equivGradientDescentTest_GradientDescentPart( steps, dtype) self.assertAllCloseAccordingToType(val0, val2, rtol=1e-5) self.assertAllCloseAccordingToType(val1, val3, rtol=1e-5) if __name__ == "__main__": test.main()
FtrlOptimizerTest
python
ansible__ansible
lib/ansible/module_utils/_internal/_traceback.py
{ "start": 299, "end": 3654 }
class ____(enum.Enum): """The events for which tracebacks can be enabled.""" ERROR = enum.auto() WARNING = enum.auto() DEPRECATED = enum.auto() DEPRECATED_VALUE = enum.auto() # implies DEPRECATED def traceback_for() -> list[str]: """Return a list of traceback event names (not enums) which are enabled.""" return [value.name.lower() for value in TracebackEvent if is_traceback_enabled(value)] def is_traceback_enabled(event: TracebackEvent) -> bool: """Return True if tracebacks are enabled for the specified event, otherwise return False.""" return _is_traceback_enabled(event) def maybe_capture_traceback(msg: str, event: TracebackEvent) -> str | None: """ Optionally capture a traceback for the current call stack, formatted as a string, if the specified traceback event is enabled. Frames marked with the `_skip_stackwalk` local are omitted. """ _skip_stackwalk = True if not is_traceback_enabled(event): return None tb_lines = [] if frame_info := _stack.caller_frame(): # DTFIX-FUTURE: rewrite target-side tracebacks to point at controller-side paths? tb_lines.append('Traceback (most recent call last):\n') tb_lines.extend(traceback.format_stack(frame_info.frame)) tb_lines.append(f'Message: {msg}\n') else: tb_lines.append('(frame not found)\n') # pragma: nocover return ''.join(tb_lines) def maybe_extract_traceback(exception: BaseException, event: TracebackEvent) -> str | None: """Optionally extract a formatted traceback from the given exception, if the specified traceback event is enabled.""" if not is_traceback_enabled(event): return None # deprecated: description='use the single-arg version of format_traceback' python_version='3.9' tb_lines = traceback.format_exception(type(exception), exception, exception.__traceback__) return ''.join(tb_lines) _module_tracebacks_enabled_events: frozenset[TracebackEvent] | None = None """Cached enabled TracebackEvent values extracted from `_ansible_tracebacks_for` module arg.""" def _is_module_traceback_enabled(event: TracebackEvent) -> bool: """Module utility function to lazily load traceback config and determine if traceback collection is enabled for the specified event.""" global _module_tracebacks_enabled_events if _module_tracebacks_enabled_events is None: try: # Suboptimal error handling, but since import order can matter, and this is a critical error path, better to fail silently # than to mask the triggering error by issuing a new error/warning here. from ..basic import _PARSED_MODULE_ARGS _module_tracebacks_enabled_events = frozenset( TracebackEvent[value.upper()] for value in _PARSED_MODULE_ARGS.get('_ansible_tracebacks_for', []) ) # type: ignore[union-attr] except BaseException: return True # if things failed early enough that we can't figure this out, assume we want a traceback for troubleshooting return event in _module_tracebacks_enabled_events _is_traceback_enabled = _is_module_traceback_enabled """Callable to determine if tracebacks are enabled. Overridden on the controller by display. Use `is_traceback_enabled` instead of calling this directly."""
TracebackEvent
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/reflection.py
{ "start": 73700, "end": 74630 }
class ____: """provides blank default values for reflection methods.""" @classmethod def columns(cls) -> List[ReflectedColumn]: return [] @classmethod def pk_constraint(cls) -> ReflectedPrimaryKeyConstraint: return { "name": None, "constrained_columns": [], } @classmethod def foreign_keys(cls) -> List[ReflectedForeignKeyConstraint]: return [] @classmethod def indexes(cls) -> List[ReflectedIndex]: return [] @classmethod def unique_constraints(cls) -> List[ReflectedUniqueConstraint]: return [] @classmethod def check_constraints(cls) -> List[ReflectedCheckConstraint]: return [] @classmethod def table_options(cls) -> Dict[str, Any]: return {} @classmethod def table_comment(cls) -> ReflectedTableComment: return {"text": None} @dataclass
ReflectionDefaults
python
mwaskom__seaborn
tests/_core/test_properties.py
{ "start": 12410, "end": 13284 }
class ____(ObjectPropertyBase): prop = Marker values = ["o", (5, 2, 0), MarkerStyle("^")] standardized_values = [MarkerStyle(x) for x in values] def assert_equal(self, a, b): a_path, b_path = a.get_path(), b.get_path() assert_array_equal(a_path.vertices, b_path.vertices) assert_array_equal(a_path.codes, b_path.codes) assert a_path.simplify_threshold == b_path.simplify_threshold assert a_path.should_simplify == b_path.should_simplify assert a.get_joinstyle() == b.get_joinstyle() assert a.get_transform().to_values() == b.get_transform().to_values() assert a.get_fillstyle() == b.get_fillstyle() def unpack(self, x): return ( x.get_path(), x.get_joinstyle(), x.get_transform().to_values(), x.get_fillstyle(), )
TestMarker
python
Netflix__metaflow
metaflow/_vendor/packaging/markers.py
{ "start": 5720, "end": 8159 }
class ____: def __init__(self, marker: str) -> None: # Note: We create a Marker object without calling this constructor in # packaging.requirements.Requirement. If any additional logic is # added here, make sure to mirror/adapt Requirement. try: self._markers = _normalize_extra_values(parse_marker(marker)) # The attribute `_markers` can be described in terms of a recursive type: # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] # # For example, the following expression: # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") # # is parsed into: # [ # (<Variable('python_version')>, <Op('>')>, <Value('3.6')>), # 'and', # [ # (<Variable('python_version')>, <Op('==')>, <Value('3.6')>), # 'or', # (<Variable('os_name')>, <Op('==')>, <Value('unix')>) # ] # ] except ParserSyntaxError as e: raise InvalidMarker(str(e)) from e def __str__(self) -> str: return _format_marker(self._markers) def __repr__(self) -> str: return f"<Marker('{self}')>" def __hash__(self) -> int: return hash((self.__class__.__name__, str(self))) def __eq__(self, other: Any) -> bool: if not isinstance(other, Marker): return NotImplemented return str(self) == str(other) def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = default_environment() current_environment["extra"] = "" if environment is not None: current_environment.update(environment) # The API used to allow setting extra to None. We need to handle this # case for backwards compatibility. if current_environment["extra"] is None: current_environment["extra"] = "" return _evaluate_markers(self._markers, current_environment)
Marker
python
doocs__leetcode
lcci/10.01.Sorted Merge/Solution.py
{ "start": 0, "end": 326 }
class ____: def merge(self, A: List[int], m: int, B: List[int], n: int) -> None: i, j = m - 1, n - 1 for k in reversed(range(m + n)): if j < 0 or i >= 0 and A[i] > B[j]: A[k] = A[i] i -= 1 else: A[k] = B[j] j -= 1
Solution
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 614, "end": 671 }
class ____: __slots__ = deque(["a", "b", "c"])
FifthGood
python
getsentry__sentry
src/sentry/taskworker/retry.py
{ "start": 1616, "end": 3440 }
class ____: """Used with tasks to define the retry policy for a task""" def __init__( self, *, times: int = 1, on: tuple[type[BaseException], ...] | None = None, ignore: tuple[type[BaseException], ...] | None = None, times_exceeded: LastAction = LastAction.Discard, delay: int | None = None, ): self._times = times self._allowed_exception_types: tuple[type[BaseException], ...] = on or () self._denied_exception_types: tuple[type[BaseException], ...] = ignore or () self._times_exceeded = times_exceeded self._delay = delay def max_attempts_reached(self, state: RetryState) -> bool: # We subtract one, as attempts starts at 0, but `times` # starts at 1. return state.attempts >= (self._times - 1) def should_retry(self, state: RetryState, exc: Exception) -> bool: # If there are no retries remaining we should not retry if self.max_attempts_reached(state): return False # Explicit RetryTaskError with attempts left. if isinstance(exc, RetryTaskError): return True # No retries for types on the ignore list if isinstance(exc, self._denied_exception_types): return False # In the retry allow list or processing deadline is exceeded # When processing deadline is exceeded, the subprocess raises a TimeoutError if isinstance(exc, (TimeoutError, self._allowed_exception_types)): return True return False def initial_state(self) -> RetryState: return RetryState( attempts=0, max_attempts=self._times, on_attempts_exceeded=self._times_exceeded.to_proto(), delay_on_retry=self._delay, )
Retry
python
tensorflow__tensorflow
tensorflow/python/ops/variable_v1.py
{ "start": 1890, "end": 14239 }
class ____(variables.Variable): """See the [Variables Guide](https://tensorflow.org/guide/variables). A variable maintains state in the graph across calls to `run()`. You add a variable to the graph by constructing an instance of the class `Variable`. The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. If you want to change the shape of a variable later you have to use an `assign` Op with `validate_shape=False`. Just like any `Tensor`, variables created with `Variable()` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. ```python import tensorflow as tf # Create a variable. w = tf.Variable(<initial-value>, name=<optional-name>) # Use the variable in the graph like any Tensor. y = tf.matmul(w, ...another variable or tensor...) # The overloaded operators are available too. z = tf.sigmoid(w + y) # Assign a new value to the variable with `assign()` or a related method. w.assign(w + 1.0) w.assign_add(1.0) ``` When you launch the graph, variables have to be explicitly initialized before you can run Ops that use their value. You can initialize a variable by running its *initializer op*, restoring the variable from a save file, or simply running an `assign` Op that assigns a value to the variable. In fact, the variable *initializer op* is just an `assign` Op that assigns the variable's initial value to the variable itself. ```python # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the variable initializer. sess.run(w.initializer) # ...you now can run ops that use the value of 'w'... ``` The most common initialization pattern is to use the convenience function `global_variables_initializer()` to add an Op to the graph that initializes all the variables. You then run that Op after launching the graph. ```python # Add an Op to initialize global variables. init_op = tf.compat.v1.global_variables_initializer() # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the Op that initializes global variables. sess.run(init_op) # ...you can now run any Op that uses variable values... ``` If you need to create a variable with an initial value dependent on another variable, use the other variable's `initialized_value()`. This ensures that variables are initialized in the right order. All variables are automatically collected in the graph where they are created. By default, the constructor adds the new variable to the graph collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function `global_variables()` returns the contents of that collection. When building a machine learning model it is often convenient to distinguish between variables holding the trainable model parameters and other variables such as a `global step` variable used to count training steps. To make this easier, the variable constructor supports a `trainable=<bool>` parameter. If `True`, the new variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. The convenience function `trainable_variables()` returns the contents of this collection. The various `Optimizer` classes use this collection as the default list of variables to optimize. WARNING: tf.Variable objects by default have a non-intuitive memory model. A Variable is represented internally as a mutable Tensor which can non-deterministically alias other Tensors in a graph. The set of operations which consume a Variable and can lead to aliasing is undetermined and can change across TensorFlow versions. Avoid writing code which relies on the value of a Variable either changing or not changing as other operations happen. For example, using Variable objects or simple functions thereof as predicates in a `tf.cond` is dangerous and error-prone: ``` v = tf.Variable(True) tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken. ``` Here, adding `use_resource=True` when constructing the variable will fix any nondeterminism issues: ``` v = tf.Variable(True, use_resource=True) tf.cond(v, lambda: v.assign(False), my_false_fn) ``` To use the replacement for variables which does not have these issues: * Add `use_resource=True` when constructing `tf.Variable`; * Call `tf.compat.v1.get_variable_scope().set_use_resource(True)` inside a `tf.compat.v1.variable_scope` before the `tf.compat.v1.get_variable()` call. """ def __init__( self, # pylint: disable=super-init-not-called initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, use_resource=None, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE, shape=None): """Creates a new variable with value `initial_value`. The new variable is added to the graph collections listed in `collections`, which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. If `trainable` is `True` the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This constructor creates both a `variable` Op and an `assign` Op to set the variable to its initial value. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. variable_def: `VariableDef` protocol buffer. If not `None`, recreates the Variable object with its contents, referencing the variable's nodes in the graph, which must already exist. The graph is not changed. `variable_def` and the other arguments are mutually exclusive. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. expected_shape: A TensorShape. If set, initial_value is expected to have this shape. import_scope: Optional `string`. Name scope to add to the `Variable.` Only used when initializing from protocol buffer. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. use_resource: whether to use resource variables. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If both `variable_def` and initial_value are specified. ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If eager execution is enabled. """ SaveSliceInfo = variables.Variable.SaveSliceInfo def initialized_value(self): # variable_v1.py is imported at the top-level internally at TF1 import time, # so the import time for this file should be reduced as much as possible. # Thus import cond only when it is used. from tensorflow.python.ops import cond # pylint: disable=g-import-not-at-top with ops.init_scope(): return cond.cond( is_variable_initialized(self), self.read_value, lambda: self.initial_value) @staticmethod def from_proto(variable_def, import_scope=None): from tensorflow.python.ops import ref_variable # pylint: disable=g-import-not-at-top return ref_variable.RefVariable( variable_def=variable_def, import_scope=import_scope) @classmethod def _variable_call( cls, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE, shape=None, experimental_enable_variable_lifting=None, expected_shape=None, collections=None, use_resource=None, **kwargs, ): """VariableV1 class getter. Useful to force the signature.""" if cls is not VariableV1: return None previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs) for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access previous_getter = variables._make_getter(getter, previous_getter) # pylint: disable=protected-access # Reset `aggregation` that is explicitly set as `None` to the enum NONE. if aggregation is None: aggregation = variables.VariableAggregation.NONE return previous_getter( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, import_scope=import_scope, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape, experimental_enable_variable_lifting=experimental_enable_variable_lifting, expected_shape=expected_shape, collections=collections, use_resource=use_resource, ) variable_scope.set_variable_v1(VariableV1)
VariableV1
python
python-openxml__python-docx
tests/test_shape.py
{ "start": 536, "end": 2283 }
class ____: """Unit-test suite for `docx.shape.InlineShapes` objects.""" def it_knows_how_many_inline_shapes_it_contains(self, body: CT_Body, document_: Mock): inline_shapes = InlineShapes(body, document_) assert len(inline_shapes) == 2 def it_can_iterate_over_its_InlineShape_instances(self, body: CT_Body, document_: Mock): inline_shapes = InlineShapes(body, document_) assert all(isinstance(s, InlineShape) for s in inline_shapes) assert len(list(inline_shapes)) == 2 def it_provides_indexed_access_to_inline_shapes(self, body: CT_Body, document_: Mock): inline_shapes = InlineShapes(body, document_) for idx in range(-2, 2): assert isinstance(inline_shapes[idx], InlineShape) def it_raises_on_indexed_access_out_of_range(self, body: CT_Body, document_: Mock): inline_shapes = InlineShapes(body, document_) with pytest.raises(IndexError, match=r"inline shape index \[-3\] out of range"): inline_shapes[-3] with pytest.raises(IndexError, match=r"inline shape index \[2\] out of range"): inline_shapes[2] def it_knows_the_part_it_belongs_to(self, body: CT_Body, document_: Mock): inline_shapes = InlineShapes(body, document_) assert inline_shapes.part is document_.part # -- fixtures -------------------------------------------------------------------------------- @pytest.fixture def body(self) -> CT_Body: return cast( CT_Body, element("w:body/w:p/(w:r/w:drawing/wp:inline, w:r/w:drawing/wp:inline)") ) @pytest.fixture def document_(self, request: FixtureRequest): return instance_mock(request, Document)
DescribeInlineShapes
python
kamyu104__LeetCode-Solutions
Python/maximum-xor-of-two-numbers-in-an-array.py
{ "start": 48, "end": 1552 }
class ____(object): def findMaximumXOR(self, nums): """ :type nums: List[int] :rtype: int """ class Trie(object): def __init__(self, bit_length): self.__nodes = [] self.__new_node() self.__bit_length = bit_length def __new_node(self): self.__nodes.append([-1]*2) return len(self.__nodes)-1 def insert(self, num): curr = 0 for i in reversed(xrange(self.__bit_length)): x = num>>i if self.__nodes[curr][x&1] == -1: self.__nodes[curr][x&1] = self.__new_node() curr = self.__nodes[curr][x&1] def query(self, num): result = curr = 0 for i in reversed(xrange(self.__bit_length)): result <<= 1 x = num>>i if self.__nodes[curr][1^(x&1)] != -1: curr = self.__nodes[curr][1^(x&1)] result |= 1 else: curr = self.__nodes[curr][x&1] return result trie = Trie(max(nums).bit_length()) result = 0 for num in nums: trie.insert(num) result = max(result, trie.query(num)) return result # Time: O(nlogr), r = max(nums) # Space: O(n)
Solution
python
pypa__warehouse
warehouse/manage/forms.py
{ "start": 6475, "end": 7310 }
class ____(wtforms.Form): __params__ = ["confirm_device_name"] label = wtforms.StringField( validators=[ wtforms.validators.InputRequired(message="Specify a device name"), wtforms.validators.Length( max=64, message=("Label must be 64 characters or less") ), ] ) def __init__(self, *args, user_service, user_id, **kwargs): super().__init__(*args, **kwargs) self.user_service = user_service self.user_id = user_id def validate_label(self, field): label = field.data webauthn = self.user_service.get_webauthn_by_label(self.user_id, label) if webauthn is None: raise wtforms.validators.ValidationError("No WebAuthn key with given label") self.webauthn = webauthn
DeleteWebAuthnForm
python
allegroai__clearml
clearml/backend_api/services/v2_9/projects.py
{ "start": 72614, "end": 74824 }
class ____(Response): """ Response of projects.get_task_tags endpoint. :param tags: The list of unique tag values :type tags: Sequence[str] :param system_tags: The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request :type system_tags: Sequence[str] """ _service = "projects" _action = "get_task_tags" _version = "2.9" _schema = { "definitions": {}, "properties": { "system_tags": { "description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "The list of unique tag values", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", } def __init__( self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any ) -> None: super(GetTaskTagsResponse, self).__init__(**kwargs) self.tags = tags self.system_tags = system_tags @schema_property("tags") def tags(self) -> Optional[List[str]]: return self._property_tags @tags.setter def tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self) -> Optional[List[str]]: return self._property_system_tags @system_tags.setter def system_tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value
GetTaskTagsResponse
python
django__django
tests/custom_lookups/tests.py
{ "start": 24999, "end": 26647 }
class ____(SimpleTestCase): def test_call_order(self): with register_lookup(models.DateField, TrackCallsYearTransform): # junk lookup - tries lookup, then transform, then fails msg = ( "Unsupported lookup 'junk' for IntegerField or join on the field not " "permitted." ) with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(birthdate__testyear__junk=2012) self.assertEqual( TrackCallsYearTransform.call_order, ["lookup", "transform"] ) TrackCallsYearTransform.call_order = [] # junk transform - tries transform only, then fails msg = ( "Unsupported lookup 'junk__more_junk' for IntegerField or join" " on the field not permitted." ) with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(birthdate__testyear__junk__more_junk=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["transform"]) TrackCallsYearTransform.call_order = [] # Just getting the year (implied __exact) - lookup only Author.objects.filter(birthdate__testyear=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"]) TrackCallsYearTransform.call_order = [] # Just getting the year (explicit __exact) - lookup only Author.objects.filter(birthdate__testyear__exact=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"])
LookupTransformCallOrderTests