language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 8171, "end": 12088 }
class ____(NonStrictDataModel): """ :param id: unique ID (in this frame) :type id: str :param uri: Data URI :type uri: str :param content_type: Content type (e.g. 'image/jpeg', 'image/png') :type content_type: str :param width: Width in pixels :type width: int :param height: Height in pixels :type height: int :param timestamp: Timestamp in the source data (for video content. for images, this value should be 0) :type timestamp: int """ _schema = { "properties": { "content_type": { "description": "Content type (e.g. 'image/jpeg', 'image/png')", "type": "string", }, "height": {"description": "Height in pixels", "type": "integer"}, "id": {"description": "unique ID (in this frame)", "type": "string"}, "timestamp": { "default": 0, "description": "Timestamp in the source data (for video content. for images, this value should be 0)", "type": "integer", }, "uri": {"description": "Data URI", "type": "string"}, "width": {"description": "Width in pixels", "type": "integer"}, }, "required": ["id", "uri"], "type": "object", } def __init__( self, id, uri, content_type=None, width=None, height=None, timestamp=0, **kwargs ): super(Mask, self).__init__(**kwargs) self.id = id self.uri = uri self.content_type = content_type self.width = width self.height = height self.timestamp = timestamp @schema_property("id") def id(self): return self._property_id @id.setter def id(self, value): if value is None: self._property_id = None return self.assert_isinstance(value, "id", six.string_types) self._property_id = value @schema_property("uri") def uri(self): return self._property_uri @uri.setter def uri(self, value): if value is None: self._property_uri = None return self.assert_isinstance(value, "uri", six.string_types) self._property_uri = value @schema_property("content_type") def content_type(self): return self._property_content_type @content_type.setter def content_type(self, value): if value is None: self._property_content_type = None return self.assert_isinstance(value, "content_type", six.string_types) self._property_content_type = value @schema_property("width") def width(self): return self._property_width @width.setter def width(self, value): if value is None: self._property_width = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "width", six.integer_types) self._property_width = value @schema_property("height") def height(self): return self._property_height @height.setter def height(self, value): if value is None: self._property_height = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "height", six.integer_types) self._property_height = value @schema_property("timestamp") def timestamp(self): return self._property_timestamp @timestamp.setter def timestamp(self, value): if value is None: self._property_timestamp = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "timestamp", six.integer_types) self._property_timestamp = value
Mask
python
nedbat__coveragepy
tests/test_version.py
{ "start": 328, "end": 1280 }
class ____(CoverageTest): """Tests of version.py""" run_in_temp_dir = False def test_version_info(self) -> None: # Make sure we didn't screw up the version_info tuple. assert isinstance(coverage.version_info, tuple) assert [type(d) for d in coverage.version_info] == [int, int, int, str, int] assert coverage.version_info[3] in {"alpha", "beta", "candidate", "final"} def test_make_version(self) -> None: assert _make_version(4, 0, 0, "alpha") == "4.0.0a0" assert _make_version(4, 0, 0, "alpha", 1) == "4.0.0a1" assert _make_version(4, 0, 0, "final") == "4.0.0" assert _make_version(4, 1, 0) == "4.1.0" assert _make_version(4, 1, 2, "beta", 3) == "4.1.2b3" assert _make_version(4, 1, 2) == "4.1.2" assert _make_version(5, 10, 2, "candidate", 7) == "5.10.2rc7" assert _make_version(5, 10, 2, "candidate", 7, 3) == "5.10.2rc7.dev3"
VersionTest
python
falconry__falcon
tests/test_httperror.py
{ "start": 2516, "end": 2986 }
class ____: def __init__(self): self.called = False def on_get(self, req, resp): self.called = True raise falcon.HTTPError( 792, # NOTE(kgriffs): Test that an int is acceptable even for 7xx codes title='Internet \xe7rashed!', description='\xc7atastrophic weather event', href='http://example.com/api/\xe7limate', href_text='Drill b\xe1by drill!', )
UnicodeFaultyResource
python
sympy__sympy
sympy/functions/special/hyper.py
{ "start": 31139, "end": 31740 }
class ____(HyperRep): """ Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """ @classmethod def _expr_small(cls, x): return atanh(sqrt(x))/sqrt(x) def _expr_small_minus(cls, x): return atan(sqrt(x))/sqrt(x) def _expr_big(cls, x, n): if n.is_even: return (acoth(sqrt(x)) + I*pi/2)/sqrt(x) else: return (acoth(sqrt(x)) - I*pi/2)/sqrt(x) def _expr_big_minus(cls, x, n): if n.is_even: return atan(sqrt(x))/sqrt(x) else: return (atan(sqrt(x)) - pi)/sqrt(x)
HyperRep_atanh
python
getsentry__sentry
src/sentry/incidents/models/alert_rule.py
{ "start": 21082, "end": 22434 }
class ____(Model): """ Provides an audit log of activity for the alert rule """ __relocation_scope__ = RelocationScope.Organization alert_rule = FlexibleForeignKey("sentry.AlertRule") previous_alert_rule = FlexibleForeignKey( "sentry.AlertRule", null=True, related_name="previous_alert_rule" ) user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL") type = models.IntegerField() date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = "sentry" db_table = "sentry_alertruleactivity" post_delete.connect(AlertRuleManager.clear_subscription_cache, sender=QuerySubscription) post_save.connect(AlertRuleManager.clear_subscription_cache, sender=QuerySubscription) post_save.connect(AlertRuleManager.clear_alert_rule_subscription_caches, sender=AlertRule) post_delete.connect(AlertRuleManager.clear_alert_rule_subscription_caches, sender=AlertRule) post_delete.connect(AlertRuleTriggerManager.clear_alert_rule_trigger_cache, sender=AlertRule) post_save.connect(AlertRuleTriggerManager.clear_alert_rule_trigger_cache, sender=AlertRule) post_save.connect(AlertRuleTriggerManager.clear_trigger_cache, sender=AlertRuleTrigger) post_delete.connect(AlertRuleTriggerManager.clear_trigger_cache, sender=AlertRuleTrigger)
AlertRuleActivity
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 62412, "end": 64226 }
class ____(ASTTrailingTypeSpec): def __init__(self, names: list[str], canonNames: list[str]) -> None: assert len(names) != 0 assert len(names) == len(canonNames), (names, canonNames) self.names = names # the canonical name list is for ID lookup self.canonNames = canonNames def __eq__(self, other: object) -> bool: if not isinstance(other, ASTTrailingTypeSpecFundamental): return NotImplemented return self.names == other.names and self.canonNames == other.canonNames def __hash__(self) -> int: return hash((self.names, self.canonNames)) def _stringify(self, transform: StringifyTransform) -> str: return ' '.join(self.names) def get_id(self, version: int) -> str: if version == 1: res = [] for a in self.canonNames: if a in _id_fundamental_v1: res.append(_id_fundamental_v1[a]) else: res.append(a) return '-'.join(res) txt = ' '.join(self.canonNames) if txt not in _id_fundamental_v2: raise Exception( 'Semi-internal error: Fundamental type "%s" can not be mapped ' 'to an ID. Is it a true fundamental type? If not so, the ' 'parser should have rejected it.' % txt ) return _id_fundamental_v2[txt] def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: first = True for n in self.names: if not first: signode += addnodes.desc_sig_space() else: first = False signode += addnodes.desc_sig_keyword_type(n, n)
ASTTrailingTypeSpecFundamental
python
pytorch__pytorch
test/distributed/test_store.py
{ "start": 35599, "end": 37316 }
class ____(TestCase): def tearDown(self): import signal super().tearDown() signal.signal(signal.SIGUSR1, signal.SIG_IGN) def test_interrupt_doesnt_break_wait(self): import signal rank_res = [None, None] def run(rank, my_store): nonlocal rank_res try: if rank == 0: time.sleep(4) my_store.set("foo", "bar") else: my_store.wait(["foo"], datetime.timedelta(seconds=10)) rank_res[rank] = True except Error as e: # noqa: F821 rank_res[rank] = e time.sleep(1) rank0_store = dist.TCPStore( host_name=DEFAULT_HOSTNAME, port=0, world_size=2, is_master=True, wait_for_workers=False, ) rank1_store = dist.TCPStore( host_name=DEFAULT_HOSTNAME, port=rank0_store.port, world_size=2, is_master=False, wait_for_workers=False, ) threads = [] for i in range(2): t = threading.Thread( target=run, args=( i, [rank0_store, rank1_store][i], ), ) t.start() threads.append(t) def handler(a, b): pass signal.signal(signal.SIGUSR1, handler) time.sleep(1) signal.pthread_kill(threads[1].ident, signal.SIGUSR1) for t in threads: t.join() self.assertTrue(rank_res[0], "rank0") self.assertTrue(rank_res[1], "rank1")
TimeoutTest
python
run-llama__llama_index
llama-index-core/llama_index/core/evaluation/retrieval/metrics.py
{ "start": 438, "end": 3010 }
class ____(BaseRetrievalMetric): """ Hit rate metric: Compute hit rate with two calculation options. - The default method checks for a single match between any of the retrieved docs and expected docs. - The more granular method checks for all potential matches between retrieved docs and expected docs. Attributes: metric_name (str): The name of the metric. use_granular_hit_rate (bool): Determines whether to use the granular method for calculation. """ metric_name: ClassVar[str] = "hit_rate" use_granular_hit_rate: bool = False def compute( self, query: Optional[str] = None, expected_ids: Optional[List[str]] = None, retrieved_ids: Optional[List[str]] = None, expected_texts: Optional[List[str]] = None, retrieved_texts: Optional[List[str]] = None, **kwargs: Any, ) -> RetrievalMetricResult: """ Compute metric based on the provided inputs. Parameters ---------- query (Optional[str]): The query string (not used in the current implementation). expected_ids (Optional[List[str]]): Expected document IDs. retrieved_ids (Optional[List[str]]): Retrieved document IDs. expected_texts (Optional[List[str]]): Expected texts (not used in the current implementation). retrieved_texts (Optional[List[str]]): Retrieved texts (not used in the current implementation). Raises ------ ValueError: If the necessary IDs are not provided. Returns ------- RetrievalMetricResult: The result with the computed hit rate score. """ # Checking for the required arguments if ( retrieved_ids is None or expected_ids is None or not retrieved_ids or not expected_ids ): raise ValueError("Retrieved ids and expected ids must be provided") if self.use_granular_hit_rate: # Granular HitRate calculation: Calculate all hits and divide by the number of expected docs expected_set = set(expected_ids) hits = sum(1 for doc_id in retrieved_ids if doc_id in expected_set) score = hits / len(expected_ids) if expected_ids else 0.0 else: # Default HitRate calculation: Check if there is a single hit is_hit = any(id in expected_ids for id in retrieved_ids) score = 1.0 if is_hit else 0.0 return RetrievalMetricResult(score=score)
HitRate
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/test_partial_slicing.py
{ "start": 282, "end": 16489 }
class ____: def test_string_index_series_name_converted(self): # GH#1644 df = DataFrame( np.random.default_rng(2).standard_normal((10, 4)), index=date_range("1/1/2000", periods=10), ) result = df.loc["1/3/2000"] assert result.name == df.index[2] result = df.T["1/3/2000"] assert result.name == df.index[2] def test_stringified_slice_with_tz(self): # GH#2658 start = "2013-01-07" idx = date_range(start=start, freq="1D", periods=10, tz="US/Eastern") df = DataFrame(np.arange(10), index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here def test_return_type_doesnt_depend_on_monotonicity(self): # GH#24892 we get Series back regardless of whether our DTI is monotonic dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) ser = Series(range(3), index=dti) # non-monotonic index ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) # key with resolution strictly lower than "min" key = "2015-5-14 00" # monotonic increasing index result = ser.loc[key] expected = ser.iloc[1:] tm.assert_series_equal(result, expected) # monotonic decreasing index result = ser.iloc[::-1].loc[key] expected = ser.iloc[::-1][:-1] tm.assert_series_equal(result, expected) # non-monotonic index result2 = ser2.loc[key] expected2 = ser2.iloc[::2] tm.assert_series_equal(result2, expected2) def test_return_type_doesnt_depend_on_monotonicity_higher_reso(self): # GH#24892 we get Series back regardless of whether our DTI is monotonic dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) ser = Series(range(3), index=dti) # non-monotonic index ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) # key with resolution strictly *higher) than "min" key = "2015-5-14 00:00:00" # monotonic increasing index result = ser.loc[key] assert result == 1 # monotonic decreasing index result = ser.iloc[::-1].loc[key] assert result == 1 # non-monotonic index result2 = ser2.loc[key] assert result2 == 0 def test_monotone_DTI_indexing_bug(self): # GH 19362 # Testing accessing the first element in a monotonic descending # partial string indexing. df = DataFrame(list(range(5))) date_list = [ "2018-01-02", "2017-02-10", "2016-03-10", "2015-03-15", "2014-03-16", ] date_index = DatetimeIndex(date_list) df["date"] = date_index expected = DataFrame({0: list(range(5)), "date": date_index}) tm.assert_frame_equal(df, expected) # We get a slice because df.index's resolution is hourly and we # are slicing with a daily-resolution string. If both were daily, # we would get a single item back dti = date_range("20170101 01:00:00", periods=3) df = DataFrame({"A": [1, 2, 3]}, index=dti[::-1]) expected = DataFrame({"A": 1}, index=dti[-1:][::-1]) result = df.loc["2017-01-03"] tm.assert_frame_equal(result, expected) result2 = df.iloc[::-1].loc["2017-01-03"] expected2 = expected.iloc[::-1] tm.assert_frame_equal(result2, expected2) def test_slice_year(self): dti = date_range(freq="B", start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) result = s["2005"] expected = s[s.index.year == 2005] tm.assert_series_equal(result, expected) df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) result = df.loc["2005"] expected = df[df.index.year == 2005] tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "partial_dtime", [ "2019", "2019Q4", "Dec 2019", "2019-12-31", "2019-12-31 23", "2019-12-31 23:59", ], ) def test_slice_end_of_period_resolution(self, partial_dtime): # GH#31064 dti = date_range("2019-12-31 23:59:55.999999999", periods=10, freq="s") ser = Series(range(10), index=dti) result = ser[partial_dtime] expected = ser.iloc[:5] tm.assert_series_equal(result, expected) def test_slice_quarter(self): dti = date_range(freq="D", start=datetime(2000, 6, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) assert len(s["2001Q1"]) == 90 df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) assert len(df.loc["1Q01"]) == 90 def test_slice_month(self): dti = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(dti)), index=dti) assert len(s["2005-11"]) == 30 df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) assert len(df.loc["2005-11"]) == 30 tm.assert_series_equal(s["2005-11"], s["11-2005"]) def test_partial_slice(self): rng = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s["2005-05":"2006-02"] expected = s["20050501":"20060228"] tm.assert_series_equal(result, expected) result = s["2005-05":] expected = s["20050501":] tm.assert_series_equal(result, expected) result = s[:"2006-02"] expected = s[:"20060228"] tm.assert_series_equal(result, expected) result = s["2005-1-1"] assert result == s.iloc[0] with pytest.raises(KeyError, match=r"^'2004-12-31'$"): s["2004-12-31"] def test_partial_slice_daily(self): rng = date_range(freq="h", start=datetime(2005, 1, 31), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s["2005-1-31"] tm.assert_series_equal(result, s.iloc[:24]) with pytest.raises(KeyError, match=r"^'2004-12-31 00'$"): s["2004-12-31 00"] def test_partial_slice_hourly(self): rng = date_range(freq="min", start=datetime(2005, 1, 1, 20, 0, 0), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s["2005-1-1"] tm.assert_series_equal(result, s.iloc[: 60 * 4]) result = s["2005-1-1 20"] tm.assert_series_equal(result, s.iloc[:60]) assert s["2005-1-1 20:00"] == s.iloc[0] with pytest.raises(KeyError, match=r"^'2004-12-31 00:15'$"): s["2004-12-31 00:15"] def test_partial_slice_minutely(self): rng = date_range(freq="s", start=datetime(2005, 1, 1, 23, 59, 0), periods=500) s = Series(np.arange(len(rng)), index=rng) result = s["2005-1-1 23:59"] tm.assert_series_equal(result, s.iloc[:60]) result = s["2005-1-1"] tm.assert_series_equal(result, s.iloc[:60]) assert s[Timestamp("2005-1-1 23:59:00")] == s.iloc[0] with pytest.raises(KeyError, match=r"^'2004-12-31 00:00:00'$"): s["2004-12-31 00:00:00"] def test_partial_slice_second_precision(self): rng = date_range( start=datetime(2005, 1, 1, 0, 0, 59, microsecond=999990), periods=20, freq="us", ) s = Series(np.arange(20), rng) tm.assert_series_equal(s["2005-1-1 00:00"], s.iloc[:10]) tm.assert_series_equal(s["2005-1-1 00:00:59"], s.iloc[:10]) tm.assert_series_equal(s["2005-1-1 00:01"], s.iloc[10:]) tm.assert_series_equal(s["2005-1-1 00:01:00"], s.iloc[10:]) assert s[Timestamp("2005-1-1 00:00:59.999990")] == s.iloc[0] with pytest.raises(KeyError, match="2005-1-1 00:00:00"): s["2005-1-1 00:00:00"] def test_partial_slicing_dataframe(self): # GH14856 # Test various combinations of string slicing resolution vs. # index resolution # - If string resolution is less precise than index resolution, # string is considered a slice # - If string resolution is equal to or more precise than index # resolution, string is considered an exact match formats = [ "%Y", "%Y-%m", "%Y-%m-%d", "%Y-%m-%d %H", "%Y-%m-%d %H:%M", "%Y-%m-%d %H:%M:%S", ] resolutions = ["year", "month", "day", "hour", "minute", "second"] for rnum, resolution in enumerate(resolutions[2:], 2): # we check only 'day', 'hour', 'minute' and 'second' unit = Timedelta("1 " + resolution) middate = datetime(2012, 1, 1, 0, 0, 0) index = DatetimeIndex([middate - unit, middate, middate + unit]) values = [1, 2, 3] df = DataFrame({"a": values}, index, dtype=np.int64) assert df.index.resolution == resolution # Timestamp with the same resolution as index # Should be exact match for Series (return scalar) # and raise KeyError for Frame for timestamp, expected in zip(index, values): ts_string = timestamp.strftime(formats[rnum]) # make ts_string as precise as index result = df["a"][ts_string] assert isinstance(result, np.int64) assert result == expected msg = rf"^'{ts_string}'$" with pytest.raises(KeyError, match=msg): df[ts_string] # Timestamp with resolution less precise than index for fmt in formats[:rnum]: for element, theslice in [[0, slice(None, 1)], [1, slice(1, None)]]: ts_string = index[element].strftime(fmt) # Series should return slice result = df["a"][ts_string] expected = df["a"][theslice] tm.assert_series_equal(result, expected) # pre-2.0 df[ts_string] was overloaded to interpret this # as slicing along index with pytest.raises(KeyError, match=ts_string): df[ts_string] # Timestamp with resolution more precise than index # Compatible with existing key # Should return scalar for Series # and raise KeyError for Frame for fmt in formats[rnum + 1 :]: ts_string = index[1].strftime(fmt) result = df["a"][ts_string] assert isinstance(result, np.int64) assert result == 2 msg = rf"^'{ts_string}'$" with pytest.raises(KeyError, match=msg): df[ts_string] # Not compatible with existing key # Should raise KeyError for fmt, res in list(zip(formats, resolutions))[rnum + 1 :]: ts = index[1] + Timedelta("1 " + res) ts_string = ts.strftime(fmt) msg = rf"^'{ts_string}'$" with pytest.raises(KeyError, match=msg): df["a"][ts_string] with pytest.raises(KeyError, match=msg): df[ts_string] def test_partial_slicing_with_multiindex(self): # GH 4758 # partial string indexing with a multi-index buggy df = DataFrame( { "ACCOUNT": ["ACCT1", "ACCT1", "ACCT1", "ACCT2"], "TICKER": ["ABC", "MNP", "XYZ", "XYZ"], "val": [1, 2, 3, 4], }, index=date_range("2013-06-19 09:30:00", periods=4, freq="5min"), ) df_multi = df.set_index(["ACCOUNT", "TICKER"], append=True) expected = DataFrame( [[1]], index=Index(["ABC"], name="TICKER"), columns=["val"] ) result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1")] tm.assert_frame_equal(result, expected) expected = df_multi.loc[ (Timestamp("2013-06-19 09:30:00", tz=None), "ACCT1", "ABC") ] result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1", "ABC")] tm.assert_series_equal(result, expected) # partial string indexing on first level, scalar indexing on the other two result = df_multi.loc[("2013-06-19", "ACCT1", "ABC")] expected = df_multi.iloc[:1].droplevel([1, 2]) tm.assert_frame_equal(result, expected) def test_partial_slicing_with_multiindex_series(self): # GH 4294 # partial slice on a series mi ser = Series( range(250), index=MultiIndex.from_product( [date_range("2000-1-1", periods=50), range(5)] ), ) s2 = ser[:-1].copy() expected = s2["2000-1-4"] result = s2[Timestamp("2000-1-4")] tm.assert_series_equal(result, expected) result = ser[Timestamp("2000-1-4")] expected = ser["2000-1-4"] tm.assert_series_equal(result, expected) df2 = DataFrame(ser) expected = df2.xs("2000-1-4") result = df2.loc[Timestamp("2000-1-4")] tm.assert_frame_equal(result, expected) def test_partial_slice_requires_monotonicity(self): # Disallowed since 2.0 (GH 37819) ser = Series(np.arange(10), date_range("2014-01-01", periods=10)) nonmonotonic = ser.iloc[[3, 5, 4]] timestamp = Timestamp("2014-01-10") with pytest.raises( KeyError, match="Value based partial slicing on non-monotonic" ): nonmonotonic["2014-01-10":] with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): nonmonotonic[timestamp:] with pytest.raises( KeyError, match="Value based partial slicing on non-monotonic" ): nonmonotonic.loc["2014-01-10":] with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): nonmonotonic.loc[timestamp:] def test_loc_datetime_length_one(self): # GH16071 df = DataFrame( columns=["1"], index=date_range("2016-10-01T00:00:00", "2016-10-01T23:59:59"), ) result = df.loc[datetime(2016, 10, 1) :] tm.assert_frame_equal(result, df) result = df.loc["2016-10-01T00:00:00":] tm.assert_frame_equal(result, df) @pytest.mark.parametrize( "start", [ "2018-12-02 21:50:00+00:00", Timestamp("2018-12-02 21:50:00+00:00"), Timestamp("2018-12-02 21:50:00+00:00").to_pydatetime(), ], ) @pytest.mark.parametrize( "end", [ "2018-12-02 21:52:00+00:00", Timestamp("2018-12-02 21:52:00+00:00"), Timestamp("2018-12-02 21:52:00+00:00").to_pydatetime(), ], ) def test_getitem_with_datestring_with_UTC_offset(self, start, end): # GH 24076 idx = date_range( start="2018-12-02 14:50:00-07:00", end="2018-12-02 14:50:00-07:00", freq="1min", ) df = DataFrame(1, index=idx, columns=["A"]) result = df[start:end] expected = df.iloc[0:3, :] tm.assert_frame_equal(result, expected) # GH 16785 start = str(start) end = str(end) with pytest.raises(ValueError, match="Both dates must"): df[start : end[:-4] + "1:00"] df = df.tz_localize(None) with pytest.raises(ValueError, match="The index must be timezone"): df[start:end] def test_slice_reduce_to_series(self): # GH 27516 df = DataFrame( {"A": range(24)}, index=date_range("2000", periods=24, freq="ME") ) expected = Series( range(12), index=date_range("2000", periods=12, freq="ME"), name="A" ) result = df.loc["2000", "A"] tm.assert_series_equal(result, expected)
TestSlicing
python
great-expectations__great_expectations
great_expectations/validator/v1_validator.py
{ "start": 1279, "end": 5563 }
class ____: """Validator. Responsible for running expectations on a batch definition. """ def __init__( self, batch_definition: BatchDefinition, result_format: ResultFormatUnion = DEFAULT_RESULT_FORMAT, batch_parameters: Optional[BatchParameters] = None, ) -> None: self._batch_definition = batch_definition self._batch_parameters = batch_parameters self.result_format = result_format self._get_validator = project_manager.get_validator def validate_expectation( self, expectation: Expectation, expectation_parameters: Optional[SuiteParameterDict] = None, ) -> ExpectationValidationResult: """Run a single expectation against the batch definition""" results = self._validate_expectation_configs( expectation_configs=[expectation.configuration], expectation_parameters=expectation_parameters, ) assert len(results) == 1 return results[0] def validate_expectation_suite( self, expectation_suite: ExpectationSuite, expectation_parameters: Optional[SuiteParameterDict] = None, ) -> ExpectationSuiteValidationResult: """Run an expectation suite against the batch definition""" results = self._validate_expectation_configs( expectation_configs=expectation_suite.expectation_configurations, expectation_parameters=expectation_parameters, ) statistics = calc_validation_statistics(results) return ExpectationSuiteValidationResult( results=results, success=statistics.success, suite_name=expectation_suite.name, statistics={ "evaluated_expectations": statistics.evaluated_expectations, "successful_expectations": statistics.successful_expectations, "unsuccessful_expectations": statistics.unsuccessful_expectations, "success_percent": statistics.success_percent, }, meta={ # run_id, validation_time, and fkeys are added to this dict # in ValidationDefinition.run "great_expectations_version": ge_version, "batch_spec": convert_to_json_serializable( self._wrapped_validator.active_batch_spec ), "batch_markers": self._wrapped_validator.active_batch_markers, "active_batch_definition": convert_to_json_serializable( self._wrapped_validator.active_batch_definition ), }, batch_id=self.active_batch_id, ) @property def active_batch_id(self) -> Optional[str]: return self._wrapped_validator.active_batch_id @property def _include_rendered_content(self) -> bool: return project_manager.is_using_cloud() @cached_property def _wrapped_validator(self) -> OldValidator: batch_request = self._batch_definition.build_batch_request( batch_parameters=self._batch_parameters ) return self._get_validator(batch_request=batch_request) def _validate_expectation_configs( self, expectation_configs: list[ExpectationConfiguration], expectation_parameters: Optional[SuiteParameterDict] = None, ) -> list[ExpectationValidationResult]: """Run a list of expectation configurations against the batch definition""" processed_expectation_configs = self._wrapped_validator.process_expectations_for_validation( expectation_configs, expectation_parameters ) runtime_configuration: dict if isinstance(self.result_format, ResultFormat): runtime_configuration = {"result_format": copy(self.result_format.value)} else: runtime_configuration = {"result_format": copy(self.result_format)} results = self._wrapped_validator.graph_validate( configurations=processed_expectation_configs, runtime_configuration=runtime_configuration, ) if self._include_rendered_content: for result in results: result.render() return results
Validator
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py
{ "start": 43421, "end": 48758 }
class ____(AlloyDBWriteBaseOperator): """ Update an Alloy DB user. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:AlloyDBUpdateUserOperator` :param user_id: Required. The ID of the user to update. :param cluster_id: Required. ID of the cluster. :param user_configuration: Required. User to update. For more details please see API documentation: https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User :param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the User resource by the update. :param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server ignores the request if it has already been completed. The server guarantees that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if the original operation with the same request ID was received, and if so, ignores the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000). :param validate_request: Optional. If set, performs request validation, but does not actually execute the request. :param allow_missing: Optional. If set to true, update succeeds even if instance is not found. In that case, a new user is created and update_mask is ignored. :param project_id: Required. The ID of the Google Cloud project where the service is used. :param location: Required. The ID of the Google Cloud region where the service is used. :param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud. :param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional. The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional. Additional metadata that is provided to the method. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = tuple( {"cluster_id", "user_id", "user_configuration", "update_mask", "allow_missing"} | set(AlloyDBWriteBaseOperator.template_fields) ) operator_extra_links = (AlloyDBUsersLink(),) def __init__( self, cluster_id: str, user_id: str, user_configuration: alloydb_v1.User | dict, update_mask: FieldMask | dict | None = None, allow_missing: bool = False, *args, **kwargs, ): super().__init__(*args, **kwargs) self.cluster_id = cluster_id self.user_id = user_id self.user_configuration = user_configuration self.update_mask = update_mask self.allow_missing = allow_missing @property def extra_links_params(self) -> dict[str, Any]: return { "location_id": self.location, "cluster_id": self.cluster_id, "project_id": self.project_id, } def execute(self, context: Context) -> dict | None: AlloyDBUsersLink.persist(context=context) if self.validate_request: self.log.info("Validating an Update AlloyDB user request.") else: self.log.info("Updating an AlloyDB user.") try: user = self.hook.update_user( cluster_id=self.cluster_id, user_id=self.user_id, project_id=self.project_id, location=self.location, user=self.user_configuration, update_mask=self.update_mask, allow_missing=self.allow_missing, request_id=self.request_id, validate_only=self.validate_request, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) except Exception as ex: raise AirflowException(ex) from ex else: result = alloydb_v1.User.to_dict(user) if not self.validate_request else None if not self.validate_request: self.log.info("AlloyDB user %s was successfully updated.", self.user_id) return result
AlloyDBUpdateUserOperator
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 55890, "end": 57302 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, count: int, seed: Optional[int] = None, records_per_sync: Optional[int] = None, records_per_slice: Optional[int] = None, ): """Airbyte Source for Faker. Documentation can be found at https://docs.airbyte.com/integrations/sources/faker Args: name (str): The name of the destination. count (int): How many users should be generated in total. This setting does not apply to the purchases or products stream. seed (Optional[int]): Manually control the faker random seed to return the same values on subsequent runs (leave -1 for random) records_per_sync (Optional[int]): How many fake records will be returned for each sync, for each stream? By default, it will take 2 syncs to create the requested 1000 records. records_per_slice (Optional[int]): How many fake records will be in each page (stream slice), before a state message is emitted? """ self.count = check.int_param(count, "count") self.seed = check.opt_int_param(seed, "seed") self.records_per_sync = check.opt_int_param(records_per_sync, "records_per_sync") self.records_per_slice = check.opt_int_param(records_per_slice, "records_per_slice") super().__init__("Faker", name)
FakerSource
python
pennersr__django-allauth
allauth/mfa/webauthn/views.py
{ "start": 1342, "end": 2444 }
class ____(FormView): form_class = AddWebAuthnForm template_name = "mfa/webauthn/add_form." + account_settings.TEMPLATE_EXTENSION def get_context_data(self, **kwargs): ret = super().get_context_data() creation_options = auth.begin_registration(self.request.user, False) ret["js_data"] = {"creation_options": creation_options} return ret def get_form_kwargs(self): ret = super().get_form_kwargs() ret["user"] = self.request.user return ret def get_success_url(self): if self.did_generate_recovery_codes: return reverse("mfa_view_recovery_codes") return reverse("mfa_index") def form_valid(self, form): auth, rc_auth = flows.add_authenticator( self.request, name=form.cleaned_data["name"], credential=form.cleaned_data["credential"], ) self.did_generate_recovery_codes = bool(rc_auth) return super().form_valid(form) add_webauthn = AddWebAuthnView.as_view() @method_decorator(login_required, name="dispatch")
AddWebAuthnView
python
bottlepy__bottle
bottle.py
{ "start": 86386, "end": 87469 }
class ____(MultiDict): """ A case-insensitive version of :class:`MultiDict` that defaults to replace the old value instead of appending it. """ def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [_hval(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(_hval(value)) def replace(self, key, value): self.dict[_hkey(key)] = [_hval(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in (_hkey(n) for n in names): if name in self.dict: del self.dict[name]
HeaderDict
python
pytorch__pytorch
torch/distributed/fsdp/_optim_utils.py
{ "start": 3510, "end": 54952 }
class ____(NamedTuple): """ This represents an optimizer state key that may be used commonly across ranks. It is based on the unflattened parameter names rather than parameter IDs to make it independent of each rank's own optimizer construction. """ unflat_param_names: tuple[str, ...] is_fsdp_managed: bool def _unflatten_optim_state( fsdp_param_info: FSDPParamInfo, flat_param_state: dict[str, Any], to_save: bool, shard_state: bool, cpu_offload: bool, ) -> list[dict[str, Any]]: """ Unflattens the optimizer state, consisting of the "state" part and the "param_groups" part. Unflattening the "state" part involves consolidating the state on the target rank and remapping from flattened to unflattened parameter IDs, and the "param_groups" part only involves remapping from flattened to unflattened parameter IDs. Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. flat_param_state (Dict[str, Any]): Entry for the flat parameter in the "state" part of the optimizer state dict. to_save (bool): Whether to save the state on this rank. Returns: List[Dict[str, Any]]: A :class:`list` holding the entries in the "state" part of the optimizer state dict corresponding to the unflattened parameters comprising the flat parameter if on the target rank or an empty :class:`list` otherwise. The final optimizer state dict will need to map these entries using the proper unflattened parameter IDs. """ if shard_state and not to_save: raise AssertionError("If ``shard_state`` is True, ``to_save`` has to be True.") consolidated_state = _communicate_optim_state( fsdp_param_info, flat_param_state, ) if to_save: unflat_param_state = _unflatten_communicated_optim_state( fsdp_param_info, consolidated_state, shard_state, ) for optim_state in unflat_param_state: # We can't use .items() below cuz we'd run into a concurrent modification error if cpu_offload: for key in list(optim_state.keys()): state = optim_state[key] if not isinstance(state, torch.Tensor): continue optim_state[key] = state.cpu() return unflat_param_state else: return [] def _is_zero_dim_tensor(x: Any) -> bool: return torch.is_tensor(x) and x.dim() == 0 def _communicate_optim_state( fsdp_param_info: FSDPParamInfo, flat_param_state: dict[str, Any], ) -> _ConsolidatedOptimState: """ Communicates the optimizer state for a flat parameter across ranks. All ranks will hold the entire non-sharded optimizer state on GPU. If ``N`` is the number of tensor optimizer states in the optimizer state dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1`` otherwise (where the plus 1 comes from all-gathering the padding per rank). Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. flat_param_state (Dict[str, Any]): The entry in the "state" part of the optimizer state dict corresponding to the flat parameter. Returns: ConsolidatedOptimState: Consolidated optimizer state for the target flat parameter. """ fsdp_state = fsdp_param_info.state flat_param = fsdp_param_info.handle.flat_param state = _ConsolidatedOptimState() tensor_state, zero_dim_tensor_state, non_tensor_state = ( state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state, ) for state_name, value in sorted_items(flat_param_state): # Positive-dimension tensor state: communicate across ranks if torch.is_tensor(value) and value.dim() > 0: # If the parameter is not sharded, then neither is the # positive-dimension tensor state, so no need to communicate it -- # we take the target rank's value if ( fsdp_state.world_size == 1 or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD ): tensor_state[state_name] = value continue if fsdp_state.compute_device is None: raise AssertionError("compute_device has not been initialized") if value.device.type != fsdp_state.compute_device.type: value = value.to(fsdp_state.compute_device) # Assume that positive-dimension tensor optimizer state # has the same shape as the sharded flat parameter buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined] tensor_buffer = value.new_zeros(*buffer_size) dist.all_gather_into_tensor( tensor_buffer, value, group=fsdp_state.process_group ) fsdp_state._device_handle.synchronize() unpadded_numel = cast( nn.Parameter, flat_param._unpadded_unsharded_size ).numel() tensor_state[state_name] = tensor_buffer[:unpadded_numel] # Zero-dimension tensor state and non-tensor state: take this rank's # value directly else: if _is_zero_dim_tensor(value): zero_dim_tensor_state[state_name] = value.detach().clone() else: non_tensor_state[state_name] = value return state def _unflatten_communicated_optim_state( fsdp_param_info: FSDPParamInfo, state: _ConsolidatedOptimState, shard_state: bool, ) -> list[dict[str, Any]]: """ Unflattens the communicated optimizer state (given by ``tensor_state``, ``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flat parameter. This should only be called on the target rank. Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. state (_ConsolidatedOptimState): Consolidated optimizer state. Returns: List[Dict[str, Any]]: A :class:`list` holding the entries in the "state" part of the optimizer state dict corresponding to the unflattened parameters comprising the flat parameter. The final optimizer state dict will need to map these entries using the proper unflattened parameter IDs. """ fsdp_state = fsdp_param_info.state handle = fsdp_param_info.handle flat_param = handle.flat_param unflat_param_state: list[dict[str, Any]] = [] flat_param_views: dict[str, Iterator] = {} num_unflat_params = flat_param._num_params tensor_state, zero_dim_tensor_state, non_tensor_state = ( state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state, ) for _ in range(num_unflat_params): unflat_state_param = {} # Add positive-dimension tensor state: unflatten with views for state_name, flat_tensor in sorted_items(tensor_state): views_generated = state_name in flat_param_views if not views_generated: views = handle._get_unflat_views(flat_tensor) flat_param_views[state_name] = views else: views = flat_param_views[state_name] optim_state: Union[torch.Tensor, ShardedTensor, DTensor] = next(views) if shard_state: osd_config = fsdp_state._optim_state_dict_config if getattr(osd_config, "_use_dtensor", False): if fsdp_state._device_mesh is None: raise AssertionError( f"Expected _device_mesh to be not None, got {fsdp_state._device_mesh}" ) optim_state = _ext_chunk_dtensor( optim_state, fsdp_state.rank, fsdp_state._device_mesh, fsdp_state._fsdp_extension, ) else: if fsdp_state.process_group is None: raise AssertionError( f"Expected process_group to be not None, got {fsdp_state.process_group}" ) optim_state = _ext_chunk_tensor( optim_state, fsdp_state.rank, fsdp_state.world_size, fsdp_state._device_handle.device_count(), fsdp_state.process_group, fsdp_state._fsdp_extension, ) unflat_state_param[state_name] = optim_state # Add zero-dimension tensor state: take the target rank's value unflat_state_param.update(sorted_items(zero_dim_tensor_state)) # Add non-tensor state: take the target rank's value unflat_state_param.update(sorted_items(non_tensor_state)) unflat_param_state.append(unflat_state_param) return unflat_param_state def _broadcast_processed_state( fsdp_state: _FSDPState, optim_state: dict[str, Any], group: Optional[dist.ProcessGroup], ) -> dict[str, Any]: objects: list[Any] = [None] if dist.get_rank(group) == 0: objects[0] = tree_map_only( torch.Tensor, lambda v: v.cpu() if v.dim() == 0 else _PosDimTensorInfo(v.shape, v.dtype), # type: ignore[union-attr] optim_state, ) dist.broadcast_object_list(objects, src=0, group=group) if dist.get_rank(group) == 0: return optim_state else: return objects[0] def _broadcast_state( fsdp_state: _FSDPState, state: Any, group: Optional[dist.ProcessGroup] ) -> Any: if dist.get_rank(group) == 0: if not isinstance(state, torch.Tensor) or state.dim() == 0: return state tensor = state.to(fsdp_state.compute_device) else: if isinstance(state, torch.Tensor): if state.dim() != 0: raise AssertionError( "For non-zero ranks, a tensor state should have zero dimension, " f"but got the state with shape {state.shape}." ) return state elif not isinstance(state, _PosDimTensorInfo): return state tensor = torch.zeros( state.shape, dtype=state.dtype, device=fsdp_state.compute_device ) dist.broadcast(tensor, src=0, group=group) return tensor def _shard_orig_param_state( fsdp_param_info: FSDPParamInfo, fqn: str, optim_state: dict[str, Any], ) -> dict[str, Any]: """ Shard the optimizer state for the original parameter with the name ``fqn``. This API should only be used when ``use_orig_params`` is True. """ if not optim_state: return {} fsdp_state = fsdp_param_info.state flat_param = fsdp_param_info.handle.flat_param param_idx = fsdp_param_info.param_indices[fqn] shard_param_info = flat_param._shard_param_infos[param_idx] # type: ignore[attr-defined] optim_state = _gather_state_dict( optim_state, pg=fsdp_state.process_group, device=fsdp_state.compute_device ) if not shard_param_info.in_shard: return {} # Flatten and shard the state. new_optim_state: dict[str, Any] = {} intra_param_start_idx = shard_param_info.intra_param_start_idx intra_param_end_idx = shard_param_info.intra_param_end_idx for state_name, value in optim_state.items(): if ( torch.is_tensor(value) and value.dim() > 0 and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD ): value = value.flatten()[ intra_param_start_idx : intra_param_end_idx # type: ignore[operator] + 1 ].clone() new_optim_state[state_name] = value return new_optim_state def _flatten_optim_state_dict( optim_state_dict: dict[str, Any], model: nn.Module, use_orig_params: bool = False, optim: Optional[torch.optim.Optimizer] = None, rank0_only: bool = False, group: Optional[dist.ProcessGroup] = None, ) -> dict[str, Any]: """ Flattens the full optimizer state dict, still keying by unflattened parameter names. If ``use_orig_params`` is True, each rank will have all FSDP-managed parameters but some of these parameters may be empty due to the sharding. For a regular optim.Optimizer, states for those empty parameters will not be initialized. So, when aggregating the FQNs across ranks, no assert will be raised on a rank even if it does not have all the states -- it is valid and FSDP know how to aggregate them. However, FSDP has to ignore handling those parameters that are not managed by FSDP and do not exist on the local rank -- it is managed by other parallelism and FSDP does not know ho to handle/aggregate them. Note that ``_flatten_tensor_optim_state`` does not need ``optim`` to flatten/shard the state. However, NamedOptimizer and KeyedOptimizer require all the states even if the corresponding parameters are empty. To this end, ``optim`` will be used to get the initial state of the empty parameters. ``optim`` should only be non-None if the ``optim` is KeyedOptimizer or NamedOptimizer. Returns: Dict[str, Any]: The flattened optimizer state dict. """ SimpleProfiler.reset() unflat_osd = optim_state_dict if "state" not in unflat_osd and not rank0_only: raise ValueError( '`optim_state_dict` must have the keys "state"' "to be a valid optimizer state dict" ) param_to_fqns = _get_param_to_fqns(model) fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model) fsdp_state = next(iter(fqn_to_fsdp_param_info.values())).state # Broadcast unflat_osd without non-scalar tensor if rank0_only is True. if rank0_only: unflat_osd = _broadcast_processed_state(fsdp_state, unflat_osd, group=group) # Construct the "state" part flat_osd_state: dict[Union[_OptimStateKey, str], Any] = {} unflat_osd_state = unflat_osd["state"] all_state_keys = set(unflat_osd_state.keys()) for param, fqns in param_to_fqns.items(): fqn = fqns[0] if fqn not in unflat_osd_state: continue all_state_keys.difference_update(fqns) if rank0_only: for fqn in fqns: if not unflat_osd_state[fqn]: continue for state_name in unflat_osd_state[fqn]: unflat_osd_state[fqn][state_name] = _broadcast_state( fsdp_state, unflat_osd_state[fqn][state_name], group=group ) fqn = fqns[0] if fqn in fqn_to_fsdp_param_info: fsdp_param_info = fqn_to_fsdp_param_info[fqn] if use_orig_params: with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING): flat_state = _shard_orig_param_state( fsdp_param_info, fqn, unflat_osd_state[fqn], ) else: flat_state = _flatten_optim_state( fsdp_param_info, unflat_osd_state, fqns, ) key = _OptimStateKey(tuple(fqns), True) # Only include non-empty states since as expected by # `torch.optim.Optimizer` s unless the optimizer is KeyedOptimizer # or NamedOptimizer. if flat_state: flat_osd_state[key] = flat_state elif use_orig_params: if len(fqns) != 1: raise AssertionError( f"use_orig_params is True but there are multiple FQNs, {fqns}." ) if optim is not None: # NamedOptimizer or KeyedOptimizer case. state = optim.state.get(param, None) # type: ignore[call-overload] if state is not None: flat_osd_state[key] = copy.deepcopy(state) else: warnings.warn( f"optim_state[{key}] is not on rank{fsdp_state.rank}.", stacklevel=2, ) else: raise RuntimeError( f"The state of {key} is empty. This should happen when " "use_orig_params=True." ) else: # do not flatten non-FSDP parameters' states if len(fqns) != 1: raise AssertionError(f"Expected len(fqns) == 1, got {len(fqns)}") key = _OptimStateKey(tuple(fqns), False) flat_osd_state[key] = copy.copy(unflat_osd_state[fqn]) if rank0_only: for fqn in fqns: if not unflat_osd_state[fqn]: continue for state_name, param_state in list(unflat_osd_state[fqn].items()): if fsdp_state.rank > 0: # Deference the tensor so that PyTorch can collect the memory. del unflat_osd_state[fqn][state_name] else: # Move the tensor in the original osd back to CPU to make the # original osd unaffected. unflat_osd_state[fqn][state_name] = param_state.cpu() # Handle user-defined state, states that are not associated with parameters. for key in all_state_keys: user_state = unflat_osd_state[key] if isinstance(user_state, torch.Tensor) and rank0_only and use_orig_params: user_state = _broadcast_state(fsdp_state, user_state, group=group) flat_osd_state[key] = copy.copy(user_state) SimpleProfiler.dump_and_reset("FSDP _flatten_optim_state_dict() profiling: ") # Construct the "param_groups" part -- copy as is since it will be # rekeyed later according to the target rank's optimizer # Only copy param_groups if it exists in unflat_osd if "param_groups" in unflat_osd: flat_osd_param_groups = copy.deepcopy(unflat_osd["param_groups"]) return {"state": flat_osd_state, "param_groups": flat_osd_param_groups} else: return {"state": flat_osd_state} def _flatten_optim_state( fsdp_param_info: FSDPParamInfo, unflat_osd_state: dict[str, dict[str, Any]], unflat_param_names: list[str], ) -> dict[str, Any]: """ Flattens the optimizer state in ``full_optim_state_dict`` for a single flat parameter in ``fsdp_param_info`` corresponding to the unflattened parameter names in ``unflat_param_names``. Args: fsdp_param_info (FSDPParamInfo): The FSDP state, the handle, and a mapping from FQN to original parameter index. unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the optimizer state dict corresponding to the unflattened parameters. unflat_param_names (List[str]): A :class:`list` of unflattened parameter names corresponding to the flat parameter ``flat_param``. Returns: Dict[str, Any]: A :class:`dict` mapping state names to their values for a particular flat parameter. The sharded optimizer state dict's "state" part will map a key to this returned value. """ fsdp_state = fsdp_param_info.state handle = fsdp_param_info.handle flat_param = handle.flat_param num_unflat_params = len(unflat_param_names) if num_unflat_params <= 0: raise AssertionError( "Expects at least one unflattened parameter corresponding to the flat parameter" ) unflat_param_shapes = flat_param._shapes num_unflat_param_shapes = len(unflat_param_shapes) if num_unflat_params != num_unflat_param_shapes: raise AssertionError( f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}" ) # Check if these unflattened parameters have any optimizer state has_state = [ bool(unflat_param_name in unflat_osd_state) for unflat_param_name in unflat_param_names ] # If none of the unflattened parameters comprising this flat parameter have # any state, then we do not want an entry in the optimizer state dict if not any(has_state): return {} # no need to flatten any state # There may still be some unflattened parameters with state and some # without unflat_param_states = [ _gather_state_dict( unflat_osd_state[unflat_param_name], pg=fsdp_state.process_group, device=fsdp_state.compute_device, ) if unflat_param_name in unflat_osd_state else None for unflat_param_name in unflat_param_names ] # Check that the unflattened parameters have the same state names state_names = None # pyrefly: ignore [bad-assignment] for unflat_param_state in unflat_param_states: if unflat_param_state is None: continue if state_names is None: state_names = set(unflat_param_state.keys()) else: if state_names != set(unflat_param_state.keys()): raise ValueError( "Differing optimizer state names for the unflattened " f"parameters: {unflat_param_names}" ) if state_names is None: raise AssertionError(f"Expected state_names to be not None, got {state_names}") # Flatten the state flat_state: dict[str, Optional[torch.Tensor]] = {} for state_name in state_names: state_values = [ unflat_param_state[state_name] if unflat_param_state is not None else None for unflat_param_state in unflat_param_states ] non_none_state_values = [v for v in state_values if v is not None] # If all ranks have None, this is a None value if not non_none_state_values: flat_state[state_name] = None continue are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True for v in non_none_state_values: are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0 are_zero_dim_tensors &= _is_zero_dim_tensor(v) are_non_tensors &= not torch.is_tensor(v) types = {type(v) for v in non_none_state_values} if len(types) != 1 or not ( are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors ): raise ValueError( f"Differing optimizer state types for state {state_name}, " f"values {non_none_state_values}, and unflattened parameter " f"names {unflat_param_names}" ) if are_pos_dim_tensors: flat_tensor = _flatten_tensor_optim_state( state_name, state_values, # type: ignore[arg-type] unflat_param_names, unflat_param_shapes, handle, ) # Shard the flattened tensor immediately to minimize max memory # usage if ( fsdp_state.world_size != 1 and fsdp_state.sharding_strategy != ShardingStrategy.NO_SHARD ): sharded_flat_tensor, _ = FlatParamHandle._get_shard( flat_tensor, fsdp_state.rank, fsdp_state.world_size, ) else: sharded_flat_tensor = flat_tensor flat_state[state_name] = sharded_flat_tensor elif are_zero_dim_tensors: flat_state[state_name] = _flatten_zero_dim_tensor_optim_state( state_name, state_values, # type: ignore[arg-type] unflat_param_names, ) else: if not are_non_tensors: raise AssertionError( f"Expected are_non_tensors to be True, got {are_non_tensors}" ) flat_state[state_name] = _flatten_non_tensor_optim_state( state_name, state_values, unflat_param_names, ) return flat_state def _flatten_tensor_optim_state( state_name: str, pos_dim_tensors: list[torch.Tensor], unflat_param_names: list[str], unflat_param_shapes: Sequence[torch.Size], handle: FlatParamHandle, ) -> torch.Tensor: """ Flattens the positive-dimension tensor optimizer state given by the values ``tensors`` for the state ``state_name`` for a single flat parameter from ``handle`` corresponding to the unflattened parameter names ``unflat_param_names`` and unflatted parameter shapes ``unflat_param_shapes``. This flattens each unflattened parameter's tensor state into one tensor. NOTE: We use zero tensors for any unflattened parameters without state since some value is required to fill those entries. This assumes that the zero tensor is mathematically equivalent to having no state, which is true for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all optimizers. Args: state_name (str): Optimizer state name. pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor optimizer state values for the unflattened parameters corresponding to the single flat parameter. unflat_param_names (List[str]): A :class:`list` of unflattened parameter names corresponding to the single flat parameter. unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes corresponding to the single flat parameter. handle (FlatParamHandle): The flat parameter's handle. Returns: torch.Tensor: A flat tensor containing the optimizer state corresponding to ``state_name`` constructed by concatenating the unflattened parameter tensor states in ``pos_dim_tensors`` (using zero tensors for any unflattened parameters without the state). """ flat_param = handle.flat_param non_none_tensors = [t for t in pos_dim_tensors if t is not None] # Check that all are tensors with the same dtype dtypes = {t.dtype for t in non_none_tensors} if len(dtypes) != 1: raise ValueError( "All unflattened parameters comprising a single flat " "parameter must have positive-dimension tensor state with the " f"same dtype but got dtypes {dtypes} for state {state_name} and " f"unflattened parameter names {unflat_param_names}" ) dtype = next(iter(dtypes)) # Check that each tensor state matches its parameter's shape for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes): if tensor is None and len(shape) == 0: raise ValueError("Flattening a zero-dimension parameter is not supported") elif tensor is not None and tensor.shape != shape: raise ValueError( "Tensor optimizer state does not have same shape as its " f"parameter: {tensor.shape} {shape}" ) # Flatten the tensor states: we do not need to add any right-hand-side # padding since the flat optimizer state tensor is sharded via # `_get_shard()`, which pads the shard as needed (just like for the flat # parameter) cpu_device = torch.device("cpu") tensors_to_flatten = [ torch.flatten(state_value.to(cpu_device)) if state_value is not None else torch.flatten( torch.zeros( size=shape, dtype=dtype, device=cpu_device, ) ) for state_value, shape in zip(pos_dim_tensors, unflat_param_shapes) ] flat_tensor = handle.flatten_tensors(tensors_to_flatten, handle._aligned_numel) flat_param_shape = flat_param._unpadded_unsharded_size # type: ignore[attr-defined] if flat_tensor.shape != flat_param_shape: raise AssertionError( f"tensor optim state: {flat_tensor.shape} flat parameter: {flat_param_shape}" ) return flat_tensor def _flatten_zero_dim_tensor_optim_state( state_name: str, zero_dim_tensors: list[torch.Tensor], unflat_param_names: list[str], ) -> torch.Tensor: """ Flattens the zero-dimension tensor optimizer state given by the values ``zero_dim_tensors`` for the state ``state_name`` for a single flat parameter corresponding to the unflattened parameter names ``unflat_param_names`` by enforcing that all tensors are the same and using that common value. NOTE: The requirement that the tensors are the same across all unflattened parameters comprising the flat parameter is needed to maintain the invariant that FSDP performs the same computation as its non-sharded equivalent. This means that none of the unflattened parameters can be missing this state since imposing a value may differ from having no value. For example, for Adam's "step", no value means maximum bias correction, while having some positive value means less bias correction. Args: state_name (str): Optimizer state name. zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state for the unflattened parameters corresponding to the single flat parameter. unflat_param_names (List[str]): A :class:`list` of unflattened parameter names corresponding to the single flat parameter. Returns: torch.Tensor: A zero-dimensional tensor giving the value of the state ``state_name`` for all unflattened parameters corresponding to the names ``unflat_param_names``. """ non_none_tensors = [t for t in zero_dim_tensors if t is not None] # Enforce that all have the same value and dtype values_set = {t.item() if t is not None else None for t in zero_dim_tensors} dtypes = {t.dtype if t is not None else None for t in zero_dim_tensors} if ( len(non_none_tensors) != len(zero_dim_tensors) or len(values_set) != 1 or len(dtypes) != 1 ): raise ValueError( "All unflattened parameters comprising a single flat " "parameter must have scalar state with the same value and dtype " f"but got values {values_set} and dtypes {dtypes} for state " f"{state_name} and unflattened parameter names " f"{unflat_param_names}" ) value = next(iter(values_set)) dtype = next(iter(dtypes)) return torch.tensor(value, dtype=dtype, device=torch.device("cpu")) def _flatten_non_tensor_optim_state( state_name: str, non_tensors: list[Any], unflat_param_names: list[str], ) -> Any: """ Flattens the non-tensor optimizer state given by the values ``non_tensors`` for the state ``state_name`` for a single flat parameter corresponding to the unflattened parameter names ``unflat_param_names`` by enforcing that all values are the same and using that common value. See the note in :func:`_flatten_zero_dim_tensor_optim_state`. Args: state_name (str): Optimizer state name. non_tensors (List[Any]): Non-tensor optimizer state for the unflattened parameters corresponding to the single flat parameter. unflat_param_names (List[str]): A :class:`list` of unflattened parameter names corresponding to the single flat parameter. Returns: Any: A non-tensor giving the value of the state ``state_name`` for all unflattened parameters corresponding to the names ``unflat_param_names``. """ non_none_non_tensors = [nt for nt in non_tensors if nt is not None] # Enforce that all have the same value (same type already checked) non_tensor_set = set(non_tensors) if len(non_none_non_tensors) != len(non_tensors) or len(non_tensor_set) != 1: raise ValueError( "All unflattened parameters comprising a single flat " "parameter must have scalar state with the same value and dtype " f"but got values {non_tensor_set} for state {state_name} and " f"unflattened parameter names {unflat_param_names}" ) non_tensor = next(iter(non_tensor_set)) return non_tensor def _rekey_sharded_optim_state_dict( sharded_osd: dict[str, Any], model: nn.Module, optim: torch.optim.Optimizer, optim_input: Optional[ Union[ list[dict[str, Any]], Iterable[nn.Parameter], ] ], using_optim_input: bool, is_named_optimizer: bool = False, ) -> dict[str, Any]: """ Rekeys the optimizer state dict from unflattened parameter names to flat parameter IDs according to the calling rank's ``optim``, which may be different across ranks. In particular, the unflattened parameter names are represented as :class:`_OptimStateKey` s. """ param_to_fqns = _get_param_to_fqns(model) flat_param_to_fqn = _get_flat_param_to_fqn(model) param_to_param_key: dict[nn.Parameter, Union[int, str]] = cast( dict[nn.Parameter, Union[int, str]], ( _get_param_to_param_id_from_optim_input(model, optim_input) if using_optim_input else _get_param_to_param_key( optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn ) ), ) # All parameter keys in `param_to_param_key` should be in # `param_to_fqns` -- strict inequality follows when not all parameters are # passed to the optimizer if len(param_to_param_key) > len(param_to_fqns): raise AssertionError( f"Expected len(param_to_param_key) <= len(param_to_fqns), got {len(param_to_param_key)} > {len(param_to_fqns)}" ) unflat_param_names_to_flat_param_key: dict[ tuple[str, ...], Union[int, str] ] = {} # for "state" unflat_param_name_to_flat_param_key: dict[ str, Union[int, str] ] = {} # for "param_groups" for param, unflat_param_names in param_to_fqns.items(): if param not in param_to_param_key: # This parameter was not passed to the optimizer continue flat_param_key = param_to_param_key[param] unflat_param_names_to_flat_param_key[tuple(unflat_param_names)] = flat_param_key for unflat_param_name in unflat_param_names: unflat_param_name_to_flat_param_key[unflat_param_name] = flat_param_key sharded_osd_state = sharded_osd["state"] rekeyed_osd_state: dict[Union[str, int], Any] = {} for key, param_state in sharded_osd_state.items(): if isinstance(key, str): rekeyed_osd_state[key] = param_state continue flat_param_key = unflat_param_names_to_flat_param_key.get( key.unflat_param_names, key.unflat_param_names ) # pyrefly: ignore [unsupported-operation] rekeyed_osd_state[flat_param_key] = param_state # Only process param_groups if it exists in sharded_osd if "param_groups" in sharded_osd: rekeyed_osd_param_groups: list[dict[str, Any]] = [] for unflat_param_group in sharded_osd["param_groups"]: flat_param_group = copy.deepcopy(unflat_param_group) flat_param_keys = sorted( { unflat_param_name_to_flat_param_key[unflat_param_name] for unflat_param_name in unflat_param_group["params"] } ) flat_param_group["params"] = flat_param_keys rekeyed_osd_param_groups.append(flat_param_group) return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups} else: return {"state": rekeyed_osd_state} def _get_param_id_to_param_from_optim_input( model: nn.Module, optim_input: Optional[ Union[ list[dict[str, Any]], Iterable[nn.Parameter], ] ] = None, ) -> dict[int, nn.Parameter]: """ Constructs a mapping from parameter IDs to parameters. This may be used both for models with ``FlatParameter`` s and without. NOTE: This method is only preserved for backward compatibility. The method :meth:`_get_param_key_to_param` is the preferred code path that does not rely on ``optim_input``. NOTE: We critically assume that, whether the optimizer input is a list of parameters or a list of parameter groups, :class:`torch.optim.Optimizer` enumerates the parameter IDs in order. In other words, for a parameter list input, the parameter IDs should be in that list order, and for a parameter groups input, the parameter IDs should be in order within each parameter group and in order across parameter groups. Args: model (nn.Module): Model whose parameters are passed into the optimizer. optim_input (Optional[Union[List[Dict[str, Any]], Iterable[nn.Parameter]]]): Input passed into the optimizer representing either a :class:`list` of parameter groups or an iterable of parameters; if ``None``, then this method assumes the input was ``model.parameters()``. (Default: ``None``) Returns: List[nn.Parameter]: Mapping from parameter IDs to parameters, where the parameter ID is implicitly the index in the :class:`list`. """ # Assume the standard case of passing `model.parameters()` to the optimizer # if `optim_input` is not specified if optim_input is None: return dict(enumerate(model.parameters())) try: # pyrefly: ignore [no-matching-overload] # pyrefly: ignore [redundant-cast] params = cast(list[nn.Parameter], list(optim_input)) except TypeError as e: raise TypeError( "Optimizer input should be an iterable of Tensors or dicts, " f"but got {optim_input}" ) from e if len(params) == 0: raise ValueError("Optimizer input should not be empty") # Check if the optimizer input represents tensors or parameter groups all_tensors = True all_dicts = True for param in params: all_tensors &= isinstance(param, torch.Tensor) all_dicts &= isinstance(param, dict) if not all_tensors and not all_dicts: raise TypeError("Optimizer input should be an iterable of Tensors or dicts") if all_tensors: return dict(enumerate(params)) if not all_dicts: raise AssertionError(f"Expected all_dicts to be True, got {all_dicts}") param_id_to_param: list[nn.Parameter] = [] for param_group in params: has_params_key = "params" in param_group # type: ignore[operator] if not has_params_key: raise AssertionError( 'A parameter group should map "params" to a list of the parameters in the group' ) # Implicitly map `flat_param_id` (current length of the list) to # `param` param_id_to_param.extend(param_group["params"]) # type: ignore[index] return dict(enumerate(param_id_to_param)) def _get_flat_param_to_fqn(model: torch.nn.Module) -> dict[FlatParameter, str]: """ Constructs a mapping from ``FlatParameter`` to a cleaned (devoid of prefixes from wrappers) fully qualified name (FQN). Note that this FQN is "non-canonical" because ``FlatParameter`` s do not come from the original module but are registered only after FSDP has been applied. This function returns the FSDP-given name for the ``FlatParameter`` (usually module._flat_param) as opposed to the canonical FQNs returned for ``FlatParameter`` s in ``_common_utils._get_param_to_fqns(...)``). Consequently, this function will only return a non-empty mapping if FSDP was applied with ``use_orig_params=False`` as, otherwise, the original parameters are used within the module and there would be no ``FlatParameter`` s in the module. """ def module_fn(module, prefix, tree_level, flat_param_to_fqn): for param_name, param in _named_parameters_with_duplicates( module, recurse=False ): if not isinstance(param, FlatParameter): continue fqn = clean_tensor_name(prefix + param_name) flat_param_to_fqn[param] = fqn def return_fn(flat_param_to_fqn): return flat_param_to_fqn flat_param_to_fqn_ret: dict[FlatParameter, str] = {} return _apply_to_modules( model, module_fn, return_fn, [fqn for fqn, _ in _named_parameters_with_duplicates(model)], flat_param_to_fqn_ret, ) def _get_param_key_to_param( optim: torch.optim.Optimizer, model: Optional[nn.Module] = None, is_named_optimizer: bool = False, param_to_fqns: Optional[dict[nn.Parameter, list[str]]] = None, flat_param_to_fqn: Optional[dict[FlatParameter, str]] = None, ) -> dict[Union[int, str], nn.Parameter]: """ Constructs a mapping from parameter keys to parameters. For the regular optimizers, the keys are parameter IDs. For NamedOptimizer, the keys are FQNs. This API may be used both for models with ``FlatParameter`` s and without. """ clean_fqn_to_curr_fqn: dict[str, str] = {} if is_named_optimizer: if param_to_fqns is None or flat_param_to_fqn is None: raise AssertionError( "The optimizer is a NamedOptimizer, `param_to_fqns` must not be None." ) if model is None: raise AssertionError(f"Expected model to be not None, got {model}") for key, _ in _named_parameters_with_duplicates(model): clean_fqn_to_curr_fqn[clean_tensor_name(key)] = key param_key_to_param: dict[Union[str, int], nn.Parameter] = {} pid = 0 for param_group in optim.param_groups: if is_named_optimizer: for param in param_group["params"]: if flat_param_to_fqn is None: raise AssertionError( f"Expected flat_param_to_fqn to be not None, got {flat_param_to_fqn}" ) if param in flat_param_to_fqn: # FlatParameter case key = flat_param_to_fqn[param] else: if param_to_fqns is None: raise AssertionError( f"Expected param_to_fqns to be not None, got {param_to_fqns}" ) # use_orig_params case if len(param_to_fqns[param]) != 1: raise AssertionError( f"Expected len(param_to_fqns[param]) == 1, got {len(param_to_fqns[param])}" ) key = param_to_fqns[param][0] try: key = clean_fqn_to_curr_fqn[key] except KeyError as e: raise KeyError( f"Can't find {key} from {list(clean_fqn_to_curr_fqn.keys())}." ) from e param_key_to_param[key] = param else: for param in param_group["params"]: param_key_to_param[pid] = param pid += 1 return param_key_to_param def _get_param_to_param_key( optim: torch.optim.Optimizer, model: Optional[nn.Module] = None, is_named_optimizer: bool = False, param_to_fqns: Optional[dict[nn.Parameter, list[str]]] = None, flat_param_to_fqn: Optional[dict[FlatParameter, str]] = None, ) -> dict[nn.Parameter, Union[int, str]]: """ Constructs the inverse mapping of :func:`_get_param_key_to_param`. This API only supports the case where `optim` is a regular optimizer, not NamedOptimizer. So the parameter keys will be parameter ids. """ param_id_to_param = _get_param_key_to_param( optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn ) return {param: param_id for param_id, param in param_id_to_param.items()} def _get_param_to_param_id_from_optim_input( model: nn.Module, optim_input: Optional[ Union[ list[dict[str, Any]], Iterable[nn.Parameter], ] ] = None, ) -> dict[nn.Parameter, int]: """Constructs the inverse mapping of :func:`_get_param_id_to_param_from_optim_input`.""" param_id_to_param = _get_param_id_to_param_from_optim_input(model, optim_input) return {param: param_id for param_id, param in param_id_to_param.items()} def _check_missing_keys_on_rank( r0_optim_state_keys: list[_OptimStateKey], optim_state_key_to_param_key: dict[_OptimStateKey, Union[str, int]], param_key_to_param: dict[Union[str, int], nn.Parameter], group: Optional[dist.ProcessGroup], ) -> None: # Ensure that all ranks have at least the optimizer states needed by # rank 0's optimizer missing_keys: list[_OptimStateKey] = [] for r0_optim_state_key in r0_optim_state_keys: if r0_optim_state_key not in optim_state_key_to_param_key: # A parameter from rank 0's optimizer does not exist for this # rank's optimizer missing_keys.append(r0_optim_state_key) continue param_key = optim_state_key_to_param_key[r0_optim_state_key] if isinstance(param_key, int): if not (param_key >= 0 and param_key < len(param_key_to_param)): raise AssertionError("Check the `param_key_to_param` construction") # We cannot use FSDPState.compute_device as this API is a global view. device = _get_pg_default_device(group) num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device) dist.all_reduce(num_missing, group=group) if num_missing.item() > 0: obj_list = [None for _ in range(dist.get_world_size(group))] dist.all_gather_object(obj_list, missing_keys, group=group) error_msg = ( "FSDP currently requires each rank to have at least the " "optimizer states needed by rank 0's optimizer but some ranks " "are missing some of those states" ) for rank, keys in enumerate(obj_list): keys = cast(list[_OptimStateKey], keys) if len(keys) > 0: error_msg += ( f"\nRank {rank} is missing states for the parameters: " f"{[key.unflat_param_names for key in keys]}" ) raise RuntimeError(error_msg) def _map_param_key_to_optim_keys( optim_state_dict: dict[str, Any], group: Optional[dist.ProcessGroup], param_key_to_param: dict[Union[int, str], nn.Parameter], param_to_fqns: dict[nn.Parameter, list[str]], fqn_to_fsdp_param_info: dict[str, FSDPParamInfo], merge_keys: bool = False, ) -> tuple[list[_OptimStateKey], dict[_OptimStateKey, Union[int, str]]]: """ Construct the local mapping between the ``_OptimStateKey`` and parameter keys and all the ``_OptimStateKey`` across ranks. If ``merge_keys`` is False, rank0 must contain all the ``_OptimStateKey``, an exception will be raised otherwise. Note that ``merge_keys`` should equal to ``use_orig_params``. """ rank = dist.get_rank(group) optim_state_key_to_param_key: dict[_OptimStateKey, Union[int, str]] = {} # local all_optim_state_keys: list[_OptimStateKey] = [] for param_key, param in param_key_to_param.items(): # Do not include parameters without state to avoid empty mappings # just like in normal `torch.optim.Optimizer.state_dict()` if param_key not in optim_state_dict["state"]: continue fqns = param_to_fqns[param] is_fsdp_managed = isinstance(param, FlatParameter) if is_fsdp_managed: if fqns[0] not in fqn_to_fsdp_param_info: raise AssertionError( f"Expected {fqns[0]} to be in fqn_to_fsdp_param_info, got keys: {list(fqn_to_fsdp_param_info.keys())}" ) is_fsdp_managed = fqns[0] in fqn_to_fsdp_param_info optim_state_key = _OptimStateKey( unflat_param_names=tuple(fqns), is_fsdp_managed=is_fsdp_managed, ) if rank == 0 or merge_keys: all_optim_state_keys.append(optim_state_key) optim_state_key_to_param_key[optim_state_key] = param_key if merge_keys: all_keys: list[list[_OptimStateKey]] = [ [] for _ in range(dist.get_world_size(group)) ] dist.all_gather_object(all_keys, all_optim_state_keys, group=group) merge_all_optim_state_keys = [*chain.from_iterable(all_keys)] all_optim_state_keys = sorted(set(merge_all_optim_state_keys)) else: key_obj_list: list[Optional[list[_OptimStateKey]]] = ( [all_optim_state_keys] if rank == 0 else [None] ) dist.broadcast_object_list(key_obj_list, src=0, group=group) if key_obj_list[0] is None: raise AssertionError( f"Expected key_obj_list[0] to be not None, got {key_obj_list[0]}" ) all_optim_state_keys = key_obj_list[0] _check_missing_keys_on_rank( all_optim_state_keys, optim_state_key_to_param_key, param_key_to_param, group, ) return all_optim_state_keys, optim_state_key_to_param_key def _unflatten_param_groups( state_dict: dict[str, Any], param_key_to_param: dict[Union[int, str], nn.Parameter], param_to_fqns: dict[nn.Parameter, list[str]], ) -> list[dict[str, Any]]: param_groups: list[dict[str, Any]] = [] for flat_param_group in state_dict["param_groups"]: unflat_param_group = copy.deepcopy(flat_param_group) param_group_params = [ param_key_to_param[flat_param_key] for flat_param_key in flat_param_group["params"] ] nested_unflat_param_names = [ param_to_fqns[param] for param in param_group_params ] unflat_param_group["params"] = [ *chain.from_iterable(nested_unflat_param_names) ] # flatten the list of lists param_groups.append(unflat_param_group) return param_groups def _is_named_optimizer(optim_state_dict: dict[str, Any]) -> bool: """ Returns whether the state_dict is from a NamedOptimizer. This function checks that the keys in the state_dict['state'] are strings (which usually are FQNs) versus integers (which usually refer to param_ids from a vanilla torch.optim.Optimizer). """ state = optim_state_dict.get("state") if not state: # If we cannot find a state, assume it is not NamedOptimizer as # NamedOptimizer has eager initialization. return False try: key = next(iter(state.keys())) except Exception as e: raise Exception(optim_state_dict) from e # noqa: TRY002 return isinstance(key, str) @dataclass
_OptimStateKey
python
huggingface__transformers
src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py
{ "start": 29882, "end": 34382 }
class ____(nn.Module): """ Vision Rotary Position Embedding for SAM2, following transformers library standards. Supports 2D (axial) rotary embeddings for spatial dimensions. """ def __init__(self, config: Sam3TrackerVideoConfig): super().__init__() dim = config.memory_attention_hidden_size // ( config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads ) # Ensure even dimension for proper axial splitting if dim % 4 != 0: raise ValueError("Dimension must be divisible by 4 for axial RoPE") end_x, end_y = config.memory_attention_rope_feat_sizes freqs = 1.0 / (config.memory_attention_rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) # Generate 2D position indices for axial rotary embedding flattened_indices = torch.arange(end_x * end_y, dtype=torch.long) x_positions = flattened_indices % end_x y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor") freqs_x = torch.outer(x_positions, freqs).float() freqs_y = torch.outer(y_positions, freqs).float() inv_freq = torch.cat([freqs_x, freqs_y], dim=-1) inv_freq = inv_freq.repeat_interleave(2, dim=-1) # directly register the cos and sin embeddings as we have a fixed feature shape self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False) self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False) @torch.no_grad() def forward(self) -> tuple[torch.Tensor, torch.Tensor]: # As the feature map size is fixed, we can just return the pre-computed embeddings. return self.rope_embeddings_cos, self.rope_embeddings_sin def rotate_pairwise(x): """ pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation. This is an optimized version of the following more explicit implementation: ```python x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device) x_rotated[..., ::2] = -x[..., 1::2] x_rotated[..., 1::2] = x[..., ::2] return x_rotated ``` """ x = x.view(*x.shape[:-1], -1, 2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return x.flatten(start_dim=-2) # TODO: This leads to ~1e-07 max diff and ~1e-09 avg diff for q_embed and k_embed from the original implementation, most likely due to the use of complex tensors in the original implementation. def apply_rotary_pos_emb_2d( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, num_k_exclude_rope: int = 0, repeat_freqs_k: bool = False, ) -> tuple[torch.Tensor, torch.Tensor]: """ Apply rotary position embedding to query and key tensors for vision models. Follows the standard transformers library pattern. Args: q: Query tensor of shape (..., seq_len, head_dim) k: Key tensor of shape (..., seq_len, head_dim) cos: Cosine position embedding of shape (seq_len, head_dim) sin: Sine position embedding of shape (seq_len, head_dim) repeat_freqs_k: Whether to repeat frequencies for keys (for cross-attention) Returns: Rotated (q, k) tensors """ k_rot, k_pass = k[..., : k.shape[-2] - num_k_exclude_rope, :], k[..., k.shape[-2] - num_k_exclude_rope :, :] q_embed = q.float() # force upscale to float32 as in the original implementation q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin) if k_rot.shape[-2] == 0: # Handle case where keys might be empty due to dropout return q_embed.type_as(q), torch.cat([k_rot, k_pass], dim=-2) # Handle key tensor - may need to repeat frequencies if different sequence length if repeat_freqs_k and k_rot.shape[-2] != q.shape[-2]: # Repeat cos/sin to match key sequence length repeat_factor = k_rot.shape[-2] // q.shape[-2] cos_k = cos.repeat(1, 1, repeat_factor, 1) sin_k = sin.repeat(1, 1, repeat_factor, 1) else: cos_k = cos sin_k = sin # Apply rotary embedding to keys k_embed = k_rot.float() # force upscale to float32 as in the original implementation k_embed = (k_embed * cos_k) + (rotate_pairwise(k_embed) * sin_k) # Concatenate back to full shape k_embed = torch.cat([k_embed.type_as(k), k_pass], dim=-2) return q_embed.type_as(q), k_embed
Sam3TrackerVideoVisionRotaryEmbedding
python
yandexdataschool__Practical_RL
week04_approx_rl/dqn/replay_buffer.py
{ "start": 157, "end": 2401 }
class ____(object): def __init__(self, size): """Create Replay buffer. Parameters ---------- size: int Max number of transitions to store in the buffer. When the buffer overflows the old memories are dropped. """ self._storage = [] self._maxsize = size self._next_idx = 0 def __len__(self): return len(self._storage) def add(self, obs_t, action, reward, obs_tp1, done): data = (obs_t, action, reward, obs_tp1, done) if self._next_idx >= len(self._storage): self._storage.append(data) else: self._storage[self._next_idx] = data self._next_idx = (self._next_idx + 1) % self._maxsize def _encode_sample(self, idxes): obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], [] for i in idxes: data = self._storage[i] obs_t, action, reward, obs_tp1, done = data obses_t.append(np.array(obs_t, copy=False)) actions.append(np.array(action, copy=False)) rewards.append(reward) obses_tp1.append(np.array(obs_tp1, copy=False)) dones.append(done) return ( np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones), ) def sample(self, batch_size): """Sample a batch of experiences. Parameters ---------- batch_size: int How many transitions to sample. Returns ------- obs_batch: np.array batch of observations act_batch: np.array batch of actions executed given obs_batch rew_batch: np.array rewards received as results of executing act_batch next_obs_batch: np.array next set of observations seen after executing act_batch done_mask: np.array done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode and 0 otherwise. """ idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)] return self._encode_sample(idxes)
ReplayBuffer
python
python-markdown__markdown
markdown/htmlparser.py
{ "start": 4124, "end": 17216 }
class ____(htmlparser.HTMLParser): """ Extract raw HTML from text. The raw HTML is stored in the [`htmlStash`][markdown.util.HtmlStash] of the [`Markdown`][markdown.Markdown] instance passed to `md` and the remaining text is stored in `cleandoc` as a list of strings. """ def __init__(self, md: Markdown, *args, **kwargs): if 'convert_charrefs' not in kwargs: kwargs['convert_charrefs'] = False # Block tags that should contain no content (self closing) self.empty_tags = set(['hr']) self.lineno_start_cache = [0] self.override_comment_update = False # This calls self.reset super().__init__(*args, **kwargs) self.md = md def reset(self): """Reset this instance. Loses all unprocessed data.""" self.inraw = False self.intail = False self.stack: list[str] = [] # When `inraw==True`, stack contains a list of tags self._cache: list[str] = [] self.cleandoc: list[str] = [] self.lineno_start_cache = [0] super().reset() def close(self): """Handle any buffered data.""" super().close() if len(self.rawdata): # Temp fix for https://bugs.python.org/issue41989 # TODO: remove this when the bug is fixed in all supported Python versions. if self.convert_charrefs and not self.cdata_elem: # pragma: no cover self.handle_data(htmlparser.unescape(self.rawdata)) else: self.handle_data(self.rawdata) # Handle any unclosed tags. if len(self._cache): self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) self._cache = [] @property def line_offset(self) -> int: """Returns char index in `self.rawdata` for the start of the current line. """ for ii in range(len(self.lineno_start_cache)-1, self.lineno-1): last_line_start_pos = self.lineno_start_cache[ii] lf_pos = self.rawdata.find('\n', last_line_start_pos) if lf_pos == -1: # No more newlines found. Use end of raw data as start of line beyond end. lf_pos = len(self.rawdata) self.lineno_start_cache.append(lf_pos+1) return self.lineno_start_cache[self.lineno-1] def at_line_start(self) -> bool: """ Returns True if current position is at start of line. Allows for up to three blank spaces at start of line. """ if self.offset == 0: return True if self.offset > 3: return False # Confirm up to first 3 chars are whitespace return self.rawdata[self.line_offset:self.line_offset + self.offset].strip() == '' def get_endtag_text(self, tag: str) -> str: """ Returns the text of the end tag. If it fails to extract the actual text from the raw data, it builds a closing tag with `tag`. """ # Attempt to extract actual tag from raw source text start = self.line_offset + self.offset m = htmlparser.endendtag.search(self.rawdata, start) if m: return self.rawdata[start:m.end()] else: # pragma: no cover # Failed to extract from raw data. Assume well formed and lowercase. return '</{}>'.format(tag) def handle_starttag(self, tag: str, attrs: Sequence[tuple[str, str]]): # Handle tags that should always be empty and do not specify a closing tag if tag in self.empty_tags: self.handle_startendtag(tag, attrs) return if self.md.is_block_level(tag) and (self.intail or (self.at_line_start() and not self.inraw)): # Started a new raw block. Prepare stack. self.inraw = True self.cleandoc.append('\n') text = self.get_starttag_text() if self.inraw: self.stack.append(tag) self._cache.append(text) else: self.cleandoc.append(text) if tag in self.CDATA_CONTENT_ELEMENTS: # This is presumably a standalone tag in a code span (see #1036). self.clear_cdata_mode() def handle_endtag(self, tag: str): text = self.get_endtag_text(tag) if self.inraw: self._cache.append(text) if tag in self.stack: # Remove tag from stack while self.stack: if self.stack.pop() == tag: break if len(self.stack) == 0: # End of raw block. if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(text):]): # Preserve blank line and end of raw block. self._cache.append('\n') else: # More content exists after `endtag`. self.intail = True # Reset stack. self.inraw = False self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) # Insert blank line between this and next line. self.cleandoc.append('\n\n') self._cache = [] else: self.cleandoc.append(text) def handle_data(self, data: str): if self.intail and '\n' in data: self.intail = False if self.inraw: self._cache.append(data) else: self.cleandoc.append(data) def handle_empty_tag(self, data: str, is_block: bool): """ Handle empty tags (`<data>`). """ if self.inraw or self.intail: # Append this to the existing raw block self._cache.append(data) elif self.at_line_start() and is_block: # Handle this as a standalone raw block if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(data):]): # Preserve blank line after tag in raw block. data += '\n' else: # More content exists after tag. self.intail = True item = self.cleandoc[-1] if self.cleandoc else '' # If we only have one newline before block element, add another if not item.endswith('\n\n') and item.endswith('\n'): self.cleandoc.append('\n') self.cleandoc.append(self.md.htmlStash.store(data)) # Insert blank line between this and next line. self.cleandoc.append('\n\n') else: self.cleandoc.append(data) def handle_startendtag(self, tag: str, attrs): self.handle_empty_tag(self.get_starttag_text(), is_block=self.md.is_block_level(tag)) def handle_charref(self, name: str): self.handle_empty_tag('&#{};'.format(name), is_block=False) def handle_entityref(self, name: str): self.handle_empty_tag('&{};'.format(name), is_block=False) def handle_comment(self, data: str): # Check if the comment is unclosed, if so, we need to override position i = self.line_offset + self.offset + len(data) + 4 if self.rawdata[i:i + 3] != '-->': self.handle_data('<') self.override_comment_update = True return self.handle_empty_tag('<!--{}-->'.format(data), is_block=True) def updatepos(self, i: int, j: int) -> int: if self.override_comment_update: self.override_comment_update = False i = 0 j = 1 return super().updatepos(i, j) def handle_decl(self, data: str): self.handle_empty_tag('<!{}>'.format(data), is_block=True) def handle_pi(self, data: str): self.handle_empty_tag('<?{}?>'.format(data), is_block=True) def unknown_decl(self, data: str): end = ']]>' if data.startswith('CDATA[') else ']>' self.handle_empty_tag('<![{}{}'.format(data, end), is_block=True) def parse_pi(self, i: int) -> int: if self.at_line_start() or self.intail: return super().parse_pi(i) # This is not the beginning of a raw block so treat as plain data # and avoid consuming any tags which may follow (see #1066). self.handle_data('<?') return i + 2 if not hasattr(htmlparser, 'commentabruptclose'): # Internal -- parse comment, return length or -1 if not terminated # see https://html.spec.whatwg.org/multipage/parsing.html#comment-start-state def parse_comment(self, i, report=True): rawdata = self.rawdata assert rawdata.startswith('<!--', i), 'unexpected call to parse_comment()' match = commentclose.search(rawdata, i+4) if not match: match = commentabruptclose.match(rawdata, i+4) if not match: return -1 if report: j = match.start() self.handle_comment(rawdata[i+4: j]) return match.end() def parse_html_declaration(self, i: int) -> int: if self.at_line_start() or self.intail: if self.rawdata[i:i+3] == '<![' and not self.rawdata[i:i+9] == '<![CDATA[': # We have encountered the bug in #1534 (Python bug `gh-77057`). # Provide an override until we drop support for Python < 3.13. result = self.parse_bogus_comment(i) if result == -1: self.handle_data(self.rawdata[i:i + 1]) return i + 1 return result return super().parse_html_declaration(i) # This is not the beginning of a raw block so treat as plain data # and avoid consuming any tags which may follow (see #1066). self.handle_data('<!') return i + 2 def parse_bogus_comment(self, i: int, report: int = 0) -> int: # Override the default behavior so that bogus comments get passed # through unaltered by setting `report` to `0` (see #1425). pos = super().parse_bogus_comment(i, report) if pos == -1: # pragma: no cover return -1 self.handle_empty_tag(self.rawdata[i:pos], is_block=False) return pos # The rest has been copied from base class in standard lib to address #1036. # As `__startag_text` is private, all references to it must be in this subclass. # The last few lines of `parse_starttag` are reversed so that `handle_starttag` # can override `cdata_mode` in certain situations (in a code span). __starttag_text: str | None = None def get_starttag_text(self) -> str: """Return full source of start tag: `<...>`.""" return self.__starttag_text def parse_starttag(self, i: int) -> int: # pragma: no cover # Treat `</>` as normal data as it is not a real tag. if self.rawdata[i:i + 3] == '</>': self.handle_data(self.rawdata[i:i + 3]) return i + 3 self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: self.handle_data(self.rawdata[i:i + 1]) return i + 1 rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between `i+1` and `j` into a tag and `attrs` attrs = [] match = htmlparser.tagfind_tolerant.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = match.group(1).lower() while k < endpos: m = htmlparser.attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: # noqa: E127 attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = htmlparser.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") # noqa: E127 else: offset = offset + len(self.__starttag_text) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: `<span attr="value" />` self.handle_startendtag(tag, attrs) else: # *** set `cdata_mode` first so we can override it in `handle_starttag` (see #1036) *** if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) self.handle_starttag(tag, attrs) return endpos
HTMLExtractor
python
huggingface__transformers
src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py
{ "start": 1064, "end": 6323 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`]. It is used to instantiate a RoBERTa-PreLayerNorm model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa-PreLayerNorm [andreasmadsen/efficient_mlm_m0.40](https://huggingface.co/andreasmadsen/efficient_mlm_m0.40) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RobertaPreLayerNormModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import RobertaPreLayerNormConfig, RobertaPreLayerNormModel >>> # Initializing a RoBERTa-PreLayerNorm configuration >>> configuration = RobertaPreLayerNormConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = RobertaPreLayerNormModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "roberta-prelayernorm" def __init__( self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.classifier_dropout = classifier_dropout __all__ = ["RobertaPreLayerNormConfig"]
RobertaPreLayerNormConfig
python
numba__numba
numba/cuda/cudadecl.py
{ "start": 4280, "end": 4397 }
class ____(ConcreteTemplate): key = cuda.activemask cases = [signature(types.uint32)] @register
Cuda_activemask
python
django-haystack__django-haystack
test_haystack/test_app_using_appconfig/tests.py
{ "start": 70, "end": 346 }
class ____(TestCase): def test_index_collection(self): from haystack import connections unified_index = connections["default"].get_unified_index() models = unified_index.get_indexed_models() self.assertIn(MicroBlogPost, models)
AppConfigTests
python
pypa__warehouse
warehouse/organizations/models.py
{ "start": 30522, "end": 30687 }
class ____(str, enum.Enum): Owner = "Owner" # Granted "Administer" permissions. Maintainer = "Maintainer" # Granted "Upload" permissions.
TeamProjectRoleType
python
huggingface__transformers
src/transformers/models/luke/modeling_luke.py
{ "start": 2636, "end": 3617 }
class ____(BaseModelOutput): r""" entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`): Sequence of entity hidden-states at the output of the last layer of the model. entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each layer plus the initial entity embedding outputs. """ entity_last_hidden_state: Optional[torch.FloatTensor] = None entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Base class for model's outputs, with potential hidden states and attentions. """ )
BaseLukeModelOutput
python
dagster-io__dagster
examples/assets_smoke_test/assets_smoke_test_tests/test_smoke_pure_python_assets.py
{ "start": 368, "end": 916 }
class ____(InMemoryIOManager): def load_input(self, context): if context.asset_key not in context.step_context.job_def.asset_layer.executable_asset_keys: column_schema = context.upstream_output.definition_metadata["column_schema"] return empty_dataframe_from_column_schema(column_schema) else: return super().load_input(context) def test_smoke_all(): assets = load_assets_from_modules([pure_python_assets]) materialize(assets, resources={"io_manager": SmokeIOManager()})
SmokeIOManager
python
kamyu104__LeetCode-Solutions
Python/longest-substring-with-at-most-k-distinct-characters.py
{ "start": 761, "end": 1323 }
class ____(object): def lengthOfLongestSubstringKDistinct(self, s, k): """ :type s: str :type k: int :rtype: int """ counter = Counter() left, max_length = 0, 0 for right, char in enumerate(s): counter[char] += 1 while len(counter) > k: counter[s[left]] -= 1 if counter[s[left]] == 0: del counter[s[left]] left += 1 max_length = max(max_length, right-left+1) return max_length
Solution2
python
apache__airflow
providers/smtp/tests/unit/smtp/notifications/test_smtp.py
{ "start": 2028, "end": 2797 }
class ____: template: str @property def rendered(self) -> str: return self.template.replace(DAG_ID_TEMPLATE_STRING, TEST_DAG_ID).replace( TI_TEMPLATE_STRING, TEST_TASK_ID ) # DAG-based templates TEMPLATED_SENDER = TemplatedString(f"{DAG_ID_TEMPLATE_STRING}_{SENDER_EMAIL_SUFFIX}") TEMPLATED_RECEIVER = TemplatedString(f"{DAG_ID_TEMPLATE_STRING}_{RECEIVER_EMAIL_SUFFIX}") TEMPLATED_SUBJECT = TemplatedString(f"{TEST_SUBJECT} {DAG_ID_TEMPLATE_STRING}") TEMPLATED_BODY = TemplatedString(f"{TEST_BODY} {DAG_ID_TEMPLATE_STRING}") # Task-based templates TEMPLATED_TI_SUBJECT = TemplatedString(f"{TEST_SUBJECT} {TI_TEMPLATE_STRING}") TEMPLATED_TI_SENDER = TemplatedString(f"{TI_TEMPLATE_STRING}_{SENDER_EMAIL_SUFFIX}")
TemplatedString
python
PyCQA__pylint
tests/functional/g/generic_alias/generic_alias_related.py
{ "start": 615, "end": 720 }
class ____: def __init__(self): pass __class_getitem__ = lambda cls, x: None
ClsClassGetItem
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/status.py
{ "start": 49, "end": 1635 }
class ____(graphene.Enum): """The status of run execution.""" QUEUED = "QUEUED" NOT_STARTED = "NOT_STARTED" MANAGED = "MANAGED" STARTING = "STARTING" STARTED = "STARTED" SUCCESS = "SUCCESS" FAILURE = "FAILURE" CANCELING = "CANCELING" CANCELED = "CANCELED" class Meta: name = "RunStatus" @property def description(self: "GrapheneRunStatus") -> str: if self == GrapheneRunStatus.QUEUED: return "Runs waiting to be launched by the Dagster Daemon." elif self == GrapheneRunStatus.NOT_STARTED: return "Runs that have been created, but not yet submitted for launch." elif self == GrapheneRunStatus.MANAGED: return "Runs that are managed outside of the Dagster control plane." elif self == GrapheneRunStatus.STARTING: return "Runs that have been launched, but execution has not yet started." elif self == GrapheneRunStatus.STARTED: return "Runs that have been launched and execution has started." elif self == GrapheneRunStatus.SUCCESS: return "Runs that have successfully completed." elif self == GrapheneRunStatus.FAILURE: return "Runs that have failed to complete." elif self == GrapheneRunStatus.CANCELING: return "Runs that are in-progress and pending to be canceled." elif self == GrapheneRunStatus.CANCELED: return "Runs that have been canceled before completion." else: check.failed(f"unhandled type {self}")
GrapheneRunStatus
python
has2k1__plotnine
plotnine/geoms/geom_errorbarh.py
{ "start": 461, "end": 2213 }
class ____(geom): """ Horizontal interval represented as an errorbar {usage} Parameters ---------- {common_parameters} height : float, default=0.5 Bar height as a fraction of the resolution of the data. """ DEFAULT_AES = { "alpha": 1, "color": "black", "linetype": "solid", "size": 0.5, } REQUIRED_AES = {"y", "xmin", "xmax"} DEFAULT_PARAMS = { "stat": "identity", "position": "identity", "na_rm": False, "height": 0.5, } draw_legend = staticmethod(geom_path.draw_legend) def setup_data(self, data: pd.DataFrame) -> pd.DataFrame: if "height" not in data: if self.params["height"]: data["height"] = self.params["height"] else: data["height"] = resolution(data["y"], False) * 0.9 data["ymin"] = data["y"] - data["height"] / 2 data["ymax"] = data["y"] + data["height"] / 2 del data["height"] return data @staticmethod def draw_group( data: pd.DataFrame, panel_params: panel_view, coord: coord, ax: Axes, params: dict[str, Any], ): f = np.hstack # create (two vertical bars) + horizontal bar bars = pd.DataFrame( { "y": f([data["ymin"], data["ymin"], data["y"]]), "yend": f([data["ymax"], data["ymax"], data["y"]]), "x": f([data["xmin"], data["xmax"], data["xmin"]]), "xend": f([data["xmin"], data["xmax"], data["xmax"]]), } ) copy_missing_columns(bars, data) geom_segment.draw_group(bars, panel_params, coord, ax, params)
geom_errorbarh
python
huggingface__transformers
src/transformers/models/idefics/vision.py
{ "start": 19053, "end": 21360 }
class ____(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = IdeficsVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = IdeficsVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
IdeficsVisionTransformer
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataflow.py
{ "start": 20959, "end": 30294 }
class ____(GoogleCloudBaseOperator): """ Starts a Dataflow Job with a Flex Template. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataflowStartFlexTemplateOperator` :param body: The request body. See: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body :param location: The location of the Dataflow job (for example europe-west1) :param project_id: The ID of the GCP project that owns the job. :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform. :param drain_pipeline: Optional, set to True if want to stop streaming job by draining it instead of canceling during killing task instance. See: https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline :param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be successfully cancelled when task is being killed. :param wait_until_finished: (Optional) If True, wait for the end of pipeline execution before exiting. If False, only submits job. If None, default behavior. The default behavior depends on the type of pipeline: * for the streaming pipeline, wait for jobs to start, * for the batch pipeline, wait for the jobs to complete. .. warning:: You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will always wait until finished. For more information, look at: `Asynchronous execution <https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__ The process of starting the Dataflow job in Airflow consists of two steps: * running a subprocess and reading the stderr/stderr log for the job id. * loop waiting for the end of the job ID from the previous step. This loop checks the status of the job. Step two is started just after step one has finished, so if you have wait_until_finished in your pipeline code, step two will not start until the process stops. When this process stops, steps two will run, but it will only execute one iteration as the job will be in a terminal state. If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finished=True to the operator, the second loop will wait for the job's terminal state. If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finished=False to the operator, the second loop will check once is job not in terminal state and exit the loop. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param deferrable: Run operator in the deferrable mode. :param expected_terminal_state: The expected final status of the operator on which the corresponding Airflow task succeeds. When not specified, it will be determined by the hook. :param append_job_name: True if unique suffix has to be appended to job name. :param poll_sleep: The time in seconds to sleep between polling Google Cloud Platform for the dataflow job status while the job is in the JOB_STATE_RUNNING state. """ template_fields: Sequence[str] = ("body", "location", "project_id", "gcp_conn_id") operator_extra_links = (DataflowJobLink(),) def __init__( self, body: dict, location: str, project_id: str = PROVIDE_PROJECT_ID, gcp_conn_id: str = "google_cloud_default", drain_pipeline: bool = False, cancel_timeout: int | None = 10 * 60, wait_until_finished: bool | None = None, impersonation_chain: str | Sequence[str] | None = None, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), append_job_name: bool = True, expected_terminal_state: str | None = None, poll_sleep: int = 10, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.body = body self.location = location self.project_id = project_id self.gcp_conn_id = gcp_conn_id self.drain_pipeline = drain_pipeline self.cancel_timeout = cancel_timeout self.wait_until_finished = wait_until_finished self.job: dict[str, str] | None = None self.impersonation_chain = impersonation_chain self.deferrable = deferrable self.expected_terminal_state = expected_terminal_state self.append_job_name = append_job_name self.poll_sleep = poll_sleep self._validate_deferrable_params() def _validate_deferrable_params(self): if self.deferrable and self.wait_until_finished: raise ValueError( "Conflict between deferrable and wait_until_finished parameters " "because it makes operator as blocking when it requires to be deferred. " "It should be True as deferrable parameter or True as wait_until_finished." ) if self.deferrable and self.wait_until_finished is None: self.wait_until_finished = False @cached_property def hook(self) -> DataflowHook: hook = DataflowHook( gcp_conn_id=self.gcp_conn_id, drain_pipeline=self.drain_pipeline, cancel_timeout=self.cancel_timeout, wait_until_finished=self.wait_until_finished, impersonation_chain=self.impersonation_chain, expected_terminal_state=self.expected_terminal_state, ) return hook def execute(self, context: Context): if self.append_job_name: self._append_uuid_to_job_name() def set_current_job(current_job): self.job = current_job DataflowJobLink.persist( context=context, project_id=self.project_id, region=self.location, job_id=self.job.get("id") ) if not self.deferrable: self.job = self.hook.start_flex_template( body=self.body, location=self.location, project_id=self.project_id, on_new_job_callback=set_current_job, ) job_id = self.hook.extract_job_id(self.job) context["task_instance"].xcom_push(key="job_id", value=job_id) return self.job self.job = self.hook.launch_job_with_flex_template( body=self.body, location=self.location, project_id=self.project_id, ) job_id = self.hook.extract_job_id(self.job) DataflowJobLink.persist( context=context, project_id=self.project_id, region=self.location, job_id=job_id ) self.defer( trigger=TemplateJobStartTrigger( project_id=self.project_id, job_id=job_id, location=self.location, gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, impersonation_chain=self.impersonation_chain, cancel_timeout=self.cancel_timeout, ), method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME, ) def _append_uuid_to_job_name(self): job_body = self.body.get("launch_parameter") or self.body.get("launchParameter") job_name = job_body.get("jobName") if job_name: job_name += f"-{uuid.uuid4()!s:.8}" job_body["jobName"] = job_name self.log.info("Job name was changed to %s", job_name) def execute_complete(self, context: Context, event: dict) -> dict[str, str]: """Execute after trigger finishes its work.""" if event["status"] in ("error", "stopped"): self.log.info("status: %s, msg: %s", event["status"], event["message"]) raise AirflowException(event["message"]) job_id = event["job_id"] self.log.info("Task %s completed with response %s", job_id, event["message"]) context["task_instance"].xcom_push(key="job_id", value=job_id) job = self.hook.get_job(job_id=job_id, project_id=self.project_id, location=self.location) return job def on_kill(self) -> None: self.log.info("On kill.") if self.job is not None: self.hook.cancel_job( job_id=self.job.get("id"), project_id=self.job.get("projectId"), location=self.job.get("location"), )
DataflowStartFlexTemplateOperator
python
pytorch__pytorch
torch/_inductor/scheduler.py
{ "start": 75943, "end": 88340 }
class ____(FusedSchedulerNode): """ This is a schedular node that consists of a set of scheduler nodes that has no data dependencies among them and can be executed in parallel. """ def get_consumer_subnode_for( self, producer: BaseSchedulerNode ) -> Optional[BaseSchedulerNode]: for buf in producer.get_outputs(): if buf.get_name() in self.read_to_node: return self.read_to_node[buf.get_name()] return None def get_producer_subnode_for( self, consumer: BaseSchedulerNode ) -> Optional[BaseSchedulerNode]: producers = OrderedSet[BaseSchedulerNode]() for rd in consumer.read_writes.reads: if rd.name not in self.scheduler.name_to_buf: continue node_name = self.scheduler.name_to_buf[rd.name].defining_op_name() if node_name in self.name_to_node: producers.add(self.name_to_node[node_name]) # Don't permit fusion if there are multiple subnodes # that this consumer reads from if len(producers) == 1: return next(iter(producers)) else: return None @classmethod def can_fuse(cls, producer: BaseSchedulerNode, consumer: BaseSchedulerNode) -> bool: why = WhyNoFuse(producer, consumer) if producer.is_foreach() and consumer.is_foreach(): producer = typing.cast(ForeachKernelSchedulerNode, producer) consumer = typing.cast(ForeachKernelSchedulerNode, consumer) foreach_match = len(producer.snodes) == len(consumer.snodes) if not foreach_match: why("foreach do not have same length") return foreach_match and all( producer.scheduler.can_fuse(l, r) for l, r in zip(producer.snodes, consumer.snodes) ) elif consumer.is_foreach(): if producer.is_reduction(): why( "candidate producer is a reduction, foreach ops cannot be fused with reductions currently" ) return False consumer = typing.cast(ForeachKernelSchedulerNode, consumer) consumer_subnode = consumer.get_consumer_subnode_for(producer) if consumer_subnode is not None: return consumer.scheduler.can_fuse(producer, consumer_subnode) why("candidate producer is not dep of any foreach consumer") return False elif producer.is_foreach(): if consumer.is_reduction(): why( "candidate consumer is a reduction, foreach ops cannot be fused with reductions currently" ) return False producer = typing.cast(ForeachKernelSchedulerNode, producer) producer_subnode = producer.get_producer_subnode_for(consumer) if producer_subnode is not None: return producer.scheduler.can_fuse(producer_subnode, consumer) why("candidate consumer has no dep in any foreach producer") return False raise AssertionError( "At least one node passed to ForeachKernelSchedulerNode.can_fuse should be a foreach node" ) @classmethod def fuse( cls, producer: BaseSchedulerNode, consumer: BaseSchedulerNode ) -> ForeachKernelSchedulerNode: assert producer.is_foreach() or consumer.is_foreach() if producer.is_foreach(): producer = typing.cast(ForeachKernelSchedulerNode, producer) use_custom_partition_algo = producer.use_custom_partition_algo enable_autotune = producer.enable_autotune else: consumer = typing.cast(ForeachKernelSchedulerNode, consumer) use_custom_partition_algo = consumer.use_custom_partition_algo enable_autotune = consumer.enable_autotune prev_node_1 = None prev_node_2 = None fused_nodes: list[BaseSchedulerNode] if producer.is_foreach() and consumer.is_foreach(): producer = typing.cast(ForeachKernelSchedulerNode, producer) consumer = typing.cast(ForeachKernelSchedulerNode, consumer) fused_nodes = [ FusedSchedulerNode.fuse(l, r) for l, r in zip(producer.snodes, consumer.snodes) ] elif producer.is_foreach(): producer = typing.cast(ForeachKernelSchedulerNode, producer) producer_subnode = producer.get_producer_subnode_for(consumer) fused_nodes = [] prev_node_1 = producer prev_node_2 = None for node in producer.snodes: if node is producer_subnode: new_node = FusedSchedulerNode.fuse(node, consumer) prev_node_2 = new_node fused_nodes.append(new_node) else: fused_nodes.append(node) elif consumer.is_foreach(): consumer = typing.cast(ForeachKernelSchedulerNode, consumer) consumer_subnode = consumer.get_consumer_subnode_for(producer) fused_nodes = [] prev_node_1 = consumer prev_node_2 = None for node in consumer.snodes: if node is consumer_subnode: new_node = FusedSchedulerNode.fuse(producer, node) prev_node_2 = new_node fused_nodes.append(new_node) else: fused_nodes.append(node) else: raise AssertionError( "At least one node passed to ForeachKernelSchedulerNode.fuse should be a foreach node" ) return cls( producer.scheduler, fused_nodes, use_custom_partition_algo=use_custom_partition_algo, prev_node_1=prev_node_1, prev_node_2=prev_node_2, enable_autotune=enable_autotune, ) def __init__( self, scheduler: Scheduler, snodes: list[BaseSchedulerNode], use_custom_partition_algo: bool, prev_node_1: Optional[BaseSchedulerNode] = None, prev_node_2: Optional[BaseSchedulerNode] = None, enable_autotune: bool = False, ) -> None: self.read_to_node = {} self.name_to_node = {} if prev_node_1 is None or prev_node_2 is None: super().__init__(scheduler, snodes) for node in snodes: for read in node.read_writes.reads: self.read_to_node[read.name] = node for name in node.get_operation_names(): self.name_to_node[name] = node else: self.scheduler = scheduler self.snodes = snodes self.node = None self.users: list[NodeUser] = [] self.set_read_writes( dependencies.ReadWrites.merge_list( [prev_node_1.read_writes, prev_node_2.read_writes] ) ) self.unmet_dependencies = ( OrderedSet( dep for dep in OrderedSet.union( prev_node_1.unmet_dependencies, prev_node_2.unmet_dependencies ) if dep.name not in self.get_buffer_names() ) - self.read_writes.writes ) self.min_order = min([prev_node_1.min_order, prev_node_2.min_order]) self.max_order = max([prev_node_1.max_order, prev_node_2.max_order]) if prev_node_1.is_foreach(): assert isinstance(prev_node_1, ForeachKernelSchedulerNode) foreach_node, other_node = prev_node_1, prev_node_2 else: assert isinstance(prev_node_2, ForeachKernelSchedulerNode) foreach_node, other_node = prev_node_2, prev_node_1 self.ancestors = foreach_node.ancestors self.ancestors.update(other_node.ancestors) self.name_to_node = foreach_node.name_to_node for name in other_node.get_operation_names(): self.name_to_node[name] = other_node self.outputs_by_name: dict[str, SchedulerBuffer] = { k: v for snode in self.snodes for k, v in snode.outputs_by_name.items() } self.use_custom_partition_algo = use_custom_partition_algo device = snodes[0].get_device() assert device self.group = (device, ((sympy.Expr("combo_kernel"),),)) self.origins = OrderedSet[torch.fx.Node]() self.enable_autotune = enable_autotune @classmethod def combinable_nodes( cls, nodes: list[BaseSchedulerNode] ) -> list[BaseSchedulerNode]: extern = [x for x in nodes if isinstance(x, ExternKernelSchedulerNode)] if extern: log.debug( "ComboKernels: %d external nodes are filtered %s", len(extern), [node.node.get_origins() for node in extern if node.node is not None], ) filtered_nodes = [ x for x in nodes if not isinstance(x, (NopKernelSchedulerNode, ExternKernelSchedulerNode)) ] foreach_nodes = [ x for x in filtered_nodes if isinstance(x, ForeachKernelSchedulerNode) ] if foreach_nodes: log.debug("ComboKernels: %d foreach nodes are filtered", len(foreach_nodes)) filtered_nodes = [ x for x in filtered_nodes if not isinstance(x, ForeachKernelSchedulerNode) ] template_nodes = [x for x in filtered_nodes if x.is_template()] if template_nodes: log.debug( "ComboKernels: %d template nodes are filtered: %s", len(template_nodes), template_nodes, ) filtered_nodes = [x for x in filtered_nodes if x not in template_nodes] return filtered_nodes @staticmethod def _default_group_nodes_for_combo_kernels( scheduler: Scheduler, ) -> list[list[BaseSchedulerNode]]: """ Returns a list of lists of nodes that are to be grouped together. """ sorted_nodes = scheduler._topological_sort_nodes() grouped_nodes = [] max_num_nodes = 8 for nodes in sorted_nodes: grouped_nodes.extend( [ nodes[i : i + max_num_nodes] for i in range(0, len(nodes), max_num_nodes) ] ) return grouped_nodes group_algorithm_for_combo_kernels: Callable[ [Scheduler], list[list[BaseSchedulerNode]] ] = _default_group_nodes_for_combo_kernels @staticmethod def set_group_algorithm_for_combo_kernels( custom_group_algorithm: Callable[[Scheduler], list[list[BaseSchedulerNode]]], ) -> None: ForeachKernelSchedulerNode.group_algorithm_for_combo_kernels = ( custom_group_algorithm ) @staticmethod def group_nodes_for_combo_kernels( scheduler: Scheduler, ) -> list[list[BaseSchedulerNode]]: return ForeachKernelSchedulerNode.group_algorithm_for_combo_kernels(scheduler) def mark_run(self) -> None: raise NotImplementedError def codegen(self) -> None: raise NotImplementedError def is_foreach(self) -> bool: return True def get_subkernel_nodes(self) -> list[BaseSchedulerNode]: """Returns a list of nodes which comprise the combo kernel. These nodes may be vertically fused.""" return list(self.snodes) def get_nodes(self) -> Sequence[BaseSchedulerNode]: """Returns all nodes contained in this kernel, unpacking fused nodes into their constituent scheduler nodes.""" return list(itertools.chain.from_iterable(x.get_nodes() for x in self.snodes)) def get_first_name(self) -> str: return self.snodes[0].get_first_name() def prune_redundant_deps( self, name_to_fused_node: dict[str, BaseSchedulerNode] ) -> None: _prune_redundant_deps(self, name_to_fused_node, self.scheduler.name_to_buf) for node in self.snodes: node.prune_redundant_deps(name_to_fused_node)
ForeachKernelSchedulerNode
python
ZoranPandovski__al-go-rithms
cryptography/DES/des.py
{ "start": 4889, "end": 9441 }
class ____(): def __init__(self): self.password = None self.text = None self.keys = list() def run(self, key, text, action=ENCRYPT, padding=False): if len(key) < 8: raise ("Key Should be 8 bytes long") elif len(key) > 8: key = key[:8] #If key size is above 8bytes, cut to be 8bytes long self.password = key self.text = text if padding and action==ENCRYPT: self.addPadding() elif len(self.text) % 8 != 0:#If not padding specified data size must be multiple of 8 bytes raise("Data size should be multiple of 8") self.generatekeys() #Generate all the keys text_blocks = nsplit(self.text, 8) #Split the text in blocks of 8 bytes so 64 bits result = list() for block in text_blocks:#Loop over all the blocks of data block = string_to_bit_array(block)#Convert the block in bit array block = self.permut(block,PI)#Apply the initial permutation g, d = nsplit(block, 32) #g(LEFT), d(RIGHT) tmp = None for i in range(16): #Do the 16 rounds d_e = self.expand(d, E) #Expand d to match Ki size (48bits) if action == ENCRYPT: tmp = self.xor(self.keys[i], d_e)#If encrypt use Ki else: tmp = self.xor(self.keys[15-i], d_e)#If decrypt start by the last key tmp = self.substitute(tmp) #Method that will apply the SBOXes tmp = self.permut(tmp, P) tmp = self.xor(g, tmp) g = d d = tmp result += self.permut(d+g, PI_1) #Do the last permut and append the result to result final_res = bit_array_to_string(result) if padding and action==DECRYPT: return self.removePadding(final_res) #Remove the padding if decrypt and padding is true else: return final_res #Return the final string of data ciphered/deciphered def substitute(self, d_e):#Substitute bytes using SBOX subblocks = nsplit(d_e, 6)#Split bit array into sublist of 6 bits result = list() for i in range(len(subblocks)): #For all the sublists block = subblocks[i] row = int(str(block[0])+str(block[5]),2)#Get the row with the first and last bit column = int(''.join([str(x) for x in block[1:][:-1]]),2) #Column is the 2,3,4,5th bits val = S_BOX[i][row][column] #Take the value in the SBOX appropriated for the round (i) bin = binvalue(val, 4)#Convert the value to binary result += [int(x) for x in bin]#And append it to the resulting list return result def permut(self, block, table):#Permut the given block using the given table (so generic method) return [block[x-1] for x in table] def expand(self, block, table):#Do the exact same thing than permut but for more clarity has been renamed return [block[x-1] for x in table] def xor(self, t1, t2):#Apply a xor and return the resulting list return [x^y for x,y in zip(t1,t2)] def generatekeys(self):#Algorithm that generates all the keys self.keys = [] key = string_to_bit_array(self.password) key = self.permut(key, CP_1) #Apply the initial permut on the key g, d = nsplit(key, 28) #Split it in to (g->LEFT),(d->RIGHT) for i in range(16):#Apply the 16 rounds g, d = self.shift(g, d, SHIFT[i]) #Apply the shift associated with the round (not always 1) tmp = g + d #Merge them self.keys.append(self.permut(tmp, CP_2)) #Apply the permut to get the Ki def shift(self, g, d, n): #Shift a list of the given value return g[n:] + g[:n], d[n:] + d[:n] def addPadding(self):#Add padding to the datas using PKCS5 spec. pad_len = 8 - (len(self.text) % 8) self.text += pad_len * chr(pad_len) def removePadding(self, data):#Remove the padding of the plain text (it assume there is padding) pad_len = ord(data[-1]) return data[:-pad_len] def encrypt(self, key, text, padding=False): return self.run(key, text, ENCRYPT, padding) def decrypt(self, key, text, padding=False): return self.run(key, text, DECRYPT, padding) if __name__ == '__main__': key = "secret_k" text= "Hello wo" d = des() r = d.encrypt(key,text) r2 = d.decrypt(key,r) print("Ciphered: %r" % r) print("Deciphered: ", r2)
des
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/translate.py
{ "start": 51296, "end": 59833 }
class ____(GoogleCloudBaseOperator): """ Translate documents provided via input and output configurations. Up to 10 target languages per operation supported. Wraps the Google cloud Translate Text (Advanced) functionality. See https://cloud.google.com/translate/docs/advanced/batch-translation. For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:TranslateDocumentBatchOperator`. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param source_language_code: Optional. The ISO-639 language code of the input text if known. If the source language isn't specified, the API attempts to identify the source language automatically and returns the source language within the response. :param target_language_codes: Required. The ISO-639 language code to use for translation of the input document. Specify up to 10 language codes here. :param location: Optional. Project or location to make a call. Must refer to a caller's project. If not specified, 'global' is used. Non-global location is required for requests using AutoML models or custom glossaries. Models and glossaries must be within the same region (have the same location-id). :param input_configs: Input configurations. The total number of files matched should be <= 100. The total content size to translate should be <= 100M Unicode codepoints. The files must use UTF-8 encoding. :param output_config: Output configuration. If 2 input configs match to the same file (that is, same input path), no output for duplicate inputs will be generated. :param format_conversions: Optional. The file format conversion map that is applied to all input files. The map key is the original mime_type. The map value is the target mime_type of translated documents. Supported file format conversion includes: - ``application/pdf`` to ``application/vnd.openxmlformats-officedocument.wordprocessingml.document`` If nothing specified, output files will be in the same format as the original file. :param customized_attribution: Optional. This flag is to support user customized attribution. If not provided, the default is ``Machine Translated by Google``. Customized attribution should follow rules in https://cloud.google.com/translate/attribution#attribution_and_logos :param enable_shadow_removal_native_pdf: Optional. If true, use the text removal server to remove the shadow text on background image for native PDF translation. Shadow removal feature can only be enabled when both ``is_translate_native_pdf_only``, ``pdf_native_only`` are False. :param enable_rotation_correction: Optional. If true, enable auto rotation correction in DVS. :param models: Optional. The models to use for translation. Map's key is target language code. Map's value is the model name. Value can be a built-in general model, or an AutoML Translation model. The value format depends on model type: - AutoML Translation models: ``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}`` - General (built-in) models: ``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``, If the map is empty or a specific model is not requested for a language pair, then default google model (NMT) is used. :param glossaries: Glossaries to be applied. It's keyed by target language code. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ operator_extra_links = (TranslateResultByOutputConfigLink(),) template_fields: Sequence[str] = ( "input_configs", "output_config", "target_language_codes", "source_language_code", "models", "glossaries", "gcp_conn_id", "impersonation_chain", ) def __init__( self, *, project_id: str = PROVIDE_PROJECT_ID, source_language_code: str, target_language_codes: MutableSequence[str] | None = None, location: str | None = None, input_configs: MutableSequence[BatchDocumentInputConfig | dict], output_config: BatchDocumentOutputConfig | dict, customized_attribution: str | None = None, format_conversions: MutableMapping[str, str] | None = None, enable_shadow_removal_native_pdf: bool = False, enable_rotation_correction: bool = False, models: MutableMapping[str, str] | None = None, glossaries: MutableMapping[str, TranslateTextGlossaryConfig] | None = None, metadata: Sequence[tuple[str, str]] = (), timeout: float | _MethodDefault = DEFAULT, retry: Retry | _MethodDefault | None = DEFAULT, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.project_id = project_id self.location = location self.target_language_codes = target_language_codes self.source_language_code = source_language_code self.input_configs = input_configs self.output_config = output_config self.customized_attribution = customized_attribution self.format_conversions = format_conversions self.enable_shadow_removal_native_pdf = enable_shadow_removal_native_pdf self.enable_rotation_correction = enable_rotation_correction self.models = models self.glossaries = glossaries self.metadata = metadata self.timeout = timeout self.retry = retry self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context) -> dict: hook = TranslateHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) try: batch_document_translate_operation = hook.batch_translate_document( project_id=self.project_id, location=self.location, target_language_codes=self.target_language_codes, source_language_code=self.source_language_code, input_configs=self.input_configs, output_config=self.output_config, customized_attribution=self.customized_attribution, format_conversions=self.format_conversions, enable_shadow_removal_native_pdf=self.enable_shadow_removal_native_pdf, enable_rotation_correction=self.enable_rotation_correction, models=self.models, glossaries=self.glossaries, metadata=self.metadata, timeout=self.timeout, retry=self.retry, ) except GoogleAPICallError as e: self.log.error("An error occurred executing batch_translate_document method: \n%s", e) raise AirflowException(e) self.log.info("Batch document translation job started.") TranslateResultByOutputConfigLink.persist( context=context, project_id=self.project_id or hook.project_id, output_config=self.output_config, ) result = hook.wait_for_operation_result(batch_document_translate_operation) self.log.info("Batch document translation job finished") return cast("dict", type(result).to_dict(result))
TranslateDocumentBatchOperator
python
pdm-project__pdm
src/pdm/cli/options.py
{ "start": 1115, "end": 1723 }
class ____(argparse.Action): def __init__(self, *args: Any, callback: ActionCallback, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.callback = callback def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Any, option_string: str | None = None, ) -> None: if not hasattr(namespace, "callbacks"): namespace.callbacks = [] callback = partial(self.callback, values=values, option_string=option_string) namespace.callbacks.append(callback)
CallbackAction
python
numba__numba
numba/parfors/parfor.py
{ "start": 124878, "end": 127198 }
class ____(ParforPassStates): """ParforPass class is responsible for converting NumPy calls in Numba intermediate representation to Parfors, which will lower into either sequential or parallel loops during lowering stage. """ def _pre_run(self): # run array analysis, a pre-requisite for parfor translation self.array_analysis.run(self.func_ir.blocks) # NOTE: Prepare _the_max_label. See #6102 ir_utils._the_max_label.update( ir_utils.find_max_label(self.func_ir.blocks)) def run(self): """run parfor conversion pass: replace Numpy calls with Parfors when possible and optimize the IR.""" self._pre_run() # run stencil translation to parfor if self.options.stencil: stencil_pass = StencilPass(self.func_ir, self.typemap, self.calltypes, self.array_analysis, self.typingctx, self.targetctx, self.flags) stencil_pass.run() if self.options.setitem: ConvertSetItemPass(self).run(self.func_ir.blocks) if self.options.numpy: ConvertNumpyPass(self).run(self.func_ir.blocks) if self.options.reduction: ConvertReducePass(self).run(self.func_ir.blocks) if self.options.prange: ConvertLoopPass(self).run(self.func_ir.blocks) if self.options.inplace_binop: ConvertInplaceBinop(self).run(self.func_ir.blocks) # setup diagnostics now parfors are found self.diagnostics.setup(self.func_ir, self.options.fusion) dprint_func_ir(self.func_ir, "after parfor pass") def _find_mask(self, arr_def): """check if an array is of B[...M...], where M is a boolean array, and other indices (if available) are ints. If found, return B, M, M's type, and a tuple representing mask indices. Otherwise, raise GuardException. """ return _find_mask(self.typemap, self.func_ir, arr_def) def _mk_parfor_loops(self, size_vars, scope, loc): """ Create loop index variables and build LoopNest objects for a parfor. """ return _mk_parfor_loops(self.typemap, size_vars, scope, loc)
ParforPass
python
kamyu104__LeetCode-Solutions
Python/zuma-game.py
{ "start": 156, "end": 1714 }
class ____(object): def findMinStep(self, board, hand): """ :type board: str :type hand: str :rtype: int """ def shrink(s): # Time: O(n^2), Space: O(1) while True: i = 0 for start in xrange(len(s)): while i < len(s) and s[start] == s[i]: i += 1 if i-start >= 3: s = s[0:start]+s[i:] break else: break return s def findMinStepHelper(board, hand, lookup): if not board: return 0 if not hand: return float("inf") if tuple(hand) in lookup[tuple(board)]: return lookup[tuple(board)][tuple(hand)] result = float("inf") for i in xrange(len(hand)): for j in xrange(len(board)+1): next_board = shrink(board[0:j] + hand[i:i+1] + board[j:]) next_hand = hand[0:i] + hand[i+1:] result = min(result, findMinStepHelper(next_board, next_hand, lookup) + 1) lookup[tuple(board)][tuple(hand)] = result return result lookup = collections.defaultdict(dict) board, hand = list(board), list(hand) result = findMinStepHelper(board, hand, lookup) return -1 if result == float("inf") else result # Time: O((b+h) * h!*(b+h-1)!/(b-1)!) # Space: O((b+h) * h!*(b+h-1)!/(b-1)!) import collections # brute force solution
Solution
python
ansible__ansible
lib/ansible/_internal/_errors/_captured.py
{ "start": 3842, "end": 4075 }
class ____(AnsibleResultCapturedError): """An exception representing error detail sourced directly by an action in its result dictionary.""" _default_message = 'Action failed.' context = 'action'
AnsibleActionCapturedError
python
walkccc__LeetCode
solutions/2540. Minimum Common Value/2540.py
{ "start": 0, "end": 324 }
class ____: def getCommon(self, nums1: list[int], nums2: list[int]) -> int: i = 0 # nums1's index j = 0 # nums2's index while i < len(nums1) and j < len(nums2): if nums1[i] == nums2[j]: return nums1[i] if nums1[i] < nums2[j]: i += 1 else: j += 1 return -1
Solution
python
astropy__astropy
astropy/modeling/tests/test_models.py
{ "start": 41493, "end": 41838 }
class ____(ModelDefault): def _calculate_separability_matrix(self): return np.array([[0]]) def test_custom_separability_matrix(): original = separability_matrix(ModelDefault(slope=1, intercept=2)) assert original.all() custom = separability_matrix(ModelCustom(slope=1, intercept=2)) assert not custom.any()
ModelCustom
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_checked_types.py
{ "start": 4941, "end": 5487 }
class ____(ABCMeta): def __new__(mcs, name, bases, dct): _store_types(dct, bases, '_checked_types', '__type__') store_invariants(dct, bases, '_checked_invariants', '__invariant__') def default_serializer(self, _, value): if isinstance(value, CheckedType): return value.serialize() return value dct.setdefault('__serializer__', default_serializer) dct['__slots__'] = () return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
_CheckedTypeMeta
python
explosion__spaCy
spacy/lang/ky/__init__.py
{ "start": 215, "end": 394 }
class ____(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS infixes = TOKENIZER_INFIXES lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS
KyrgyzDefaults
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py
{ "start": 6882, "end": 7757 }
class ____: __slots__ = (9, 8, 7) __slots__ = ( # This is just an empty tuple, # but, # it's very well ) # documented # We don't deduplicate elements; # this just ensures that duplicate elements aren't unnecessarily # reordered by an autofix: __slots__ = ( "duplicate_element", # comment1 "duplicate_element", # comment3 "duplicate_element", # comment2 "duplicate_element", # comment0 ) __slots__ = "foo", "an" "implicitly_concatenated_second_item", not_a_string_literal __slots__ =[ [] ] __slots__ = [ () ] __slots__ = ( () ) __slots__ = ( [] ) __slots__ = ( (), ) __slots__ = ( [], ) __slots__ = ( "foo", [], "bar" ) __slots__ = [ "foo", (), "bar" ]
Klass6
python
PyCQA__bandit
tests/unit/core/test_manager.py
{ "start": 301, "end": 14256 }
class ____(testtools.TestCase): def _get_issue_instance( self, sev=constants.MEDIUM, cwe=issue.Cwe.MULTIPLE_BINDS, conf=constants.MEDIUM, ): new_issue = issue.Issue(sev, cwe, conf, "Test issue") new_issue.fname = "code.py" new_issue.test = "bandit_plugin" new_issue.lineno = 1 return new_issue def setUp(self): super().setUp() self.profile = {} self.profile["include"] = { "any_other_function_with_shell_equals_true", "assert_used", } self.config = config.BanditConfig() self.manager = manager.BanditManager( config=self.config, agg_type="file", debug=False, verbose=False ) def test_create_manager(self): # make sure we can create a manager self.assertEqual(False, self.manager.debug) self.assertEqual(False, self.manager.verbose) self.assertEqual("file", self.manager.agg_type) def test_create_manager_with_profile(self): # make sure we can create a manager m = manager.BanditManager( config=self.config, agg_type="file", debug=False, verbose=False, profile=self.profile, ) self.assertEqual(False, m.debug) self.assertEqual(False, m.verbose) self.assertEqual("file", m.agg_type) def test_matches_globlist(self): self.assertTrue(manager._matches_glob_list("test", ["*tes*"])) self.assertFalse(manager._matches_glob_list("test", ["*fes*"])) def test_is_file_included(self): a = manager._is_file_included( path="a.py", included_globs=["*.py"], excluded_path_strings=[], enforce_glob=True, ) b = manager._is_file_included( path="a.dd", included_globs=["*.py"], excluded_path_strings=[], enforce_glob=False, ) c = manager._is_file_included( path="a.py", included_globs=["*.py"], excluded_path_strings=["a.py"], enforce_glob=True, ) d = manager._is_file_included( path="a.dd", included_globs=["*.py"], excluded_path_strings=[], enforce_glob=True, ) e = manager._is_file_included( path="x_a.py", included_globs=["*.py"], excluded_path_strings=["x_*.py"], enforce_glob=True, ) f = manager._is_file_included( path="x.py", included_globs=["*.py"], excluded_path_strings=["x_*.py"], enforce_glob=True, ) self.assertTrue(a) self.assertTrue(b) self.assertFalse(c) self.assertFalse(d) self.assertFalse(e) self.assertTrue(f) @mock.patch("os.walk") def test_get_files_from_dir(self, os_walk): os_walk.return_value = [ ("/", ("a"), ()), ("/a", (), ("a.py", "b.py", "c.ww")), ] inc, exc = manager._get_files_from_dir( files_dir="", included_globs=["*.py"], excluded_path_strings=None ) self.assertEqual({"/a/c.ww"}, exc) self.assertEqual({"/a/a.py", "/a/b.py"}, inc) def test_populate_baseline_success(self): # Test populate_baseline with valid JSON baseline_data = """{ "results": [ { "code": "test code", "filename": "example_file.py", "issue_severity": "low", "issue_cwe": { "id": 605, "link": "%s" }, "issue_confidence": "low", "issue_text": "test issue", "test_name": "some_test", "test_id": "x", "line_number": "n", "line_range": "n-m" } ] } """ % ( "https://cwe.mitre.org/data/definitions/605.html" ) issue_dictionary = { "code": "test code", "filename": "example_file.py", "issue_severity": "low", "issue_cwe": issue.Cwe(issue.Cwe.MULTIPLE_BINDS).as_dict(), "issue_confidence": "low", "issue_text": "test issue", "test_name": "some_test", "test_id": "x", "line_number": "n", "line_range": "n-m", } baseline_items = [issue.issue_from_dict(issue_dictionary)] self.manager.populate_baseline(baseline_data) self.assertEqual(baseline_items, self.manager.baseline) @mock.patch("logging.Logger.warning") def test_populate_baseline_invalid_json(self, mock_logger_warning): # Test populate_baseline with invalid JSON content baseline_data = """{"data": "bad"}""" self.manager.populate_baseline(baseline_data) # Default value for manager.baseline is [] self.assertEqual([], self.manager.baseline) self.assertTrue(mock_logger_warning.called) def test_results_count(self): levels = [constants.LOW, constants.MEDIUM, constants.HIGH] self.manager.results = [ issue.Issue( severity=level, cwe=issue.Cwe.MULTIPLE_BINDS, confidence=level ) for level in levels ] r = [ self.manager.results_count(sev_filter=level, conf_filter=level) for level in levels ] self.assertEqual([3, 2, 1], r) def test_output_results_invalid_format(self): # Test that output_results succeeds given an invalid format temp_directory = self.useFixture(fixtures.TempDir()).path lines = 5 sev_level = constants.LOW conf_level = constants.LOW output_filename = os.path.join(temp_directory, "_temp_output") output_format = "invalid" with open(output_filename, "w") as tmp_file: self.manager.output_results( lines, sev_level, conf_level, tmp_file, output_format ) self.assertTrue(os.path.isfile(output_filename)) def test_output_results_valid_format(self): # Test that output_results succeeds given a valid format temp_directory = self.useFixture(fixtures.TempDir()).path lines = 5 sev_level = constants.LOW conf_level = constants.LOW output_filename = os.path.join(temp_directory, "_temp_output.txt") output_format = "txt" with open(output_filename, "w") as tmp_file: self.manager.output_results( lines, sev_level, conf_level, tmp_file, output_format ) self.assertTrue(os.path.isfile(output_filename)) @mock.patch("os.path.isdir") def test_discover_files_recurse_skip(self, isdir): isdir.return_value = True self.manager.discover_files(["thing"], False) self.assertEqual([], self.manager.files_list) self.assertEqual([], self.manager.excluded_files) @mock.patch("os.path.isdir") def test_discover_files_recurse_files(self, isdir): isdir.return_value = True with mock.patch.object(manager, "_get_files_from_dir") as m: m.return_value = ({"files"}, {"excluded"}) self.manager.discover_files(["thing"], True) self.assertEqual(["files"], self.manager.files_list) self.assertEqual(["excluded"], self.manager.excluded_files) @mock.patch("os.path.isdir") def test_discover_files_exclude(self, isdir): isdir.return_value = False with mock.patch.object(manager, "_is_file_included") as m: m.return_value = False self.manager.discover_files(["thing"], True) self.assertEqual([], self.manager.files_list) self.assertEqual(["thing"], self.manager.excluded_files) @mock.patch("os.path.isdir") def test_discover_files_exclude_dir(self, isdir): isdir.return_value = False # Test exclude dir using wildcard self.manager.discover_files(["./x/y.py"], True, "./x/*") self.assertEqual([], self.manager.files_list) self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without wildcard isdir.side_effect = [True, False] self.manager.discover_files(["./x/y.py"], True, "./x/") self.assertEqual([], self.manager.files_list) self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without wildcard or trailing slash isdir.side_effect = [True, False] self.manager.discover_files(["./x/y.py"], True, "./x") self.assertEqual([], self.manager.files_list) self.assertEqual(["./x/y.py"], self.manager.excluded_files) # Test exclude dir without prefix or suffix isdir.side_effect = [False, False] self.manager.discover_files(["./x/y/z.py"], True, "y") self.assertEqual([], self.manager.files_list) self.assertEqual(["./x/y/z.py"], self.manager.excluded_files) @mock.patch("os.path.isdir") def test_discover_files_exclude_cmdline(self, isdir): isdir.return_value = False with mock.patch.object(manager, "_is_file_included") as m: self.manager.discover_files( ["a", "b", "c"], True, excluded_paths="a,b" ) m.assert_called_with( "c", ["*.py", "*.pyw"], ["a", "b"], enforce_glob=False ) @mock.patch("os.path.isdir") def test_discover_files_exclude_glob(self, isdir): isdir.return_value = False self.manager.discover_files( ["a.py", "test_a.py", "test.py"], True, excluded_paths="test_*.py" ) self.assertEqual(["./a.py", "./test.py"], self.manager.files_list) self.assertEqual(["test_a.py"], self.manager.excluded_files) @mock.patch("os.path.isdir") def test_discover_files_include(self, isdir): isdir.return_value = False with mock.patch.object(manager, "_is_file_included") as m: m.return_value = True self.manager.discover_files(["thing"], True) self.assertEqual(["./thing"], self.manager.files_list) self.assertEqual([], self.manager.excluded_files) def test_run_tests_keyboardinterrupt(self): # Test that bandit manager exits when there is a keyboard interrupt temp_directory = self.useFixture(fixtures.TempDir()).path some_file = os.path.join(temp_directory, "some_code_file.py") with open(some_file, "w") as fd: fd.write("some_code = x + 1") self.manager.files_list = [some_file] with mock.patch( "bandit.core.metrics.Metrics.count_issues" ) as mock_count_issues: mock_count_issues.side_effect = KeyboardInterrupt # assert a SystemExit with code 2 self.assertRaisesRegex(SystemExit, "2", self.manager.run_tests) def test_run_tests_ioerror(self): # Test that a file name is skipped and added to the manager.skipped # list when there is an IOError attempting to open/read the file temp_directory = self.useFixture(fixtures.TempDir()).path no_such_file = os.path.join(temp_directory, "no_such_file.py") self.manager.files_list = [no_such_file] self.manager.run_tests() # since the file name and the IOError.strerror text are added to # manager.skipped, we convert skipped to str to find just the file name # since IOError is not constant self.assertIn(no_such_file, str(self.manager.skipped)) def test_compare_baseline(self): issue_a = self._get_issue_instance() issue_a.fname = "file1.py" issue_b = self._get_issue_instance() issue_b.fname = "file2.py" issue_c = self._get_issue_instance(sev=constants.HIGH) issue_c.fname = "file1.py" # issue c is in results, not in baseline self.assertEqual( [issue_c], manager._compare_baseline_results( [issue_a, issue_b], [issue_a, issue_b, issue_c] ), ) # baseline and results are the same self.assertEqual( [], manager._compare_baseline_results( [issue_a, issue_b, issue_c], [issue_a, issue_b, issue_c] ), ) # results are better than baseline self.assertEqual( [], manager._compare_baseline_results( [issue_a, issue_b, issue_c], [issue_a, issue_b] ), ) def test_find_candidate_matches(self): issue_a = self._get_issue_instance() issue_b = self._get_issue_instance() issue_c = self._get_issue_instance() issue_c.fname = "file1.py" # issue a and b are the same, both should be returned as candidates self.assertEqual( {issue_a: [issue_a, issue_b]}, manager._find_candidate_matches([issue_a], [issue_a, issue_b]), ) # issue a and c are different, only a should be returned self.assertEqual( {issue_a: [issue_a]}, manager._find_candidate_matches([issue_a], [issue_a, issue_c]), ) # c doesn't match a, empty list should be returned self.assertEqual( {issue_a: []}, manager._find_candidate_matches([issue_a], [issue_c]), ) # a and b match, a and b should both return a and b candidates self.assertEqual( {issue_a: [issue_a, issue_b], issue_b: [issue_a, issue_b]}, manager._find_candidate_matches( [issue_a, issue_b], [issue_a, issue_b, issue_c] ), )
ManagerTests
python
getlogbook__logbook
src/logbook/queues.py
{ "start": 9097, "end": 10565 }
class ____: """Baseclass for all subscribers.""" def recv(self, timeout=None): """Receives a single record from the socket. Timeout of 0 means nonblocking, `None` means blocking and otherwise it's a timeout in seconds after which the function just returns with `None`. Subclasses have to override this. """ raise NotImplementedError() def dispatch_once(self, timeout=None): """Receives one record from the socket, loads it and dispatches it. Returns `True` if something was dispatched or `False` if it timed out. """ rv = self.recv(timeout) if rv is not None: dispatch_record(rv) return True return False def dispatch_forever(self): """Starts a loop that dispatches log records forever.""" while 1: self.dispatch_once() def dispatch_in_background(self, setup=None): """Starts a new daemonized thread that dispatches in the background. An optional handler setup can be provided that pushed to the new thread (can be any :class:`logbook.base.StackedObject`). Returns a :class:`ThreadController` object for shutting down the background thread. The background thread will already be running when this function returns. """ controller = ThreadController(self, setup) controller.start() return controller
SubscriberBase
python
python-pillow__Pillow
src/PIL/ExifTags.py
{ "start": 9126, "end": 9305 }
class ____(IntEnum): InteropIndex = 0x0001 InteropVersion = 0x0002 RelatedImageFileFormat = 0x1000 RelatedImageWidth = 0x1001 RelatedImageHeight = 0x1002
Interop
python
GoogleCloudPlatform__python-docs-samples
endpoints/bookstore-grpc-transcoding/bookstore.py
{ "start": 776, "end": 2271 }
class ____: """An in-memory backend for storing Bookstore data.""" def __init__(self): self._last_shelf_id = 0 self._shelves = dict() self._lock = threading.Lock() def list_shelf(self): with self._lock: return [s._shelf for (_, s) in self._shelves.items()] def create_shelf(self, shelf): with self._lock: self._last_shelf_id += 1 shelf_id = self._last_shelf_id shelf.id = shelf_id self._shelves[shelf_id] = ShelfInfo(shelf) return (shelf, shelf_id) def get_shelf(self, shelf_id): with self._lock: return self._shelves[shelf_id]._shelf def delete_shelf(self, shelf_id): with self._lock: del self._shelves[shelf_id] def list_books(self, shelf_id): with self._lock: return [book for (_, book) in self._shelves[shelf_id]._books.items()] def create_book(self, shelf_id, book): with self._lock: shelf_info = self._shelves[shelf_id] shelf_info._last_book_id += 1 book_id = shelf_info._last_book_id book.id = book_id shelf_info._books[book_id] = book return book def get_book(self, shelf_id, book_id): with self._lock: return self._shelves[shelf_id]._books[book_id] def delete_book(self, shelf_id, book_id): with self._lock: del self._shelves[shelf_id]._books[book_id]
Bookstore
python
Textualize__textual
src/textual/demo/widgets.py
{ "start": 18985, "end": 19599 }
class ____(containers.VerticalGroup): DEFAULT_CLASSES = "column" TABS_MD = """\ ## Tabs A navigable list of section headers. Typically used with `ContentTabs`, to display additional content associate with each tab. Use the cursor keys to navigate. """ DEFAULT_CSS = """ .bio { padding: 1 2; background: $boost; color: $foreground-muted; } """ def compose(self) -> ComposeResult: yield Markdown(self.TABS_MD) with TabbedContent(*[bio["name"] for bio in DUNE_BIOS]): for bio in DUNE_BIOS: yield Static(bio["description"], classes="bio")
TabsDemo
python
tornadoweb__tornado
tornado/test/runtests.py
{ "start": 2934, "end": 3475 }
class ____(logging.Filter): """Counts the number of WARNING or higher log records.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.info_count = self.warning_count = self.error_count = 0 def filter(self, record): if record.levelno >= logging.ERROR: self.error_count += 1 elif record.levelno >= logging.WARNING: self.warning_count += 1 elif record.levelno >= logging.INFO: self.info_count += 1 return True
LogCounter
python
walkccc__LeetCode
solutions/2566. Maximum Difference by Remapping a Digit/2566.py
{ "start": 0, "end": 325 }
class ____: def minMaxDifference(self, num: int) -> int: s = str(num) to9 = s[self._firstNotNineIndex(s)] to0 = s[0] return int(s.replace(to9, '9')) - int(s.replace(to0, '0')) def _firstNotNineIndex(self, s: str) -> int: for i, c in enumerate(s): if c != '9': return i return 0
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/json.py
{ "start": 706, "end": 3498 }
class ____(sqltypes.JSON): """MSSQL JSON type. MSSQL supports JSON-formatted data as of SQL Server 2016. The :class:`_mssql.JSON` datatype at the DDL level will represent the datatype as ``NVARCHAR(max)``, but provides for JSON-level comparison functions as well as Python coercion behavior. :class:`_mssql.JSON` is used automatically whenever the base :class:`_types.JSON` datatype is used against a SQL Server backend. .. seealso:: :class:`_types.JSON` - main documentation for the generic cross-platform JSON datatype. The :class:`_mssql.JSON` type supports persistence of JSON values as well as the core index operations provided by :class:`_types.JSON` datatype, by adapting the operations to render the ``JSON_VALUE`` or ``JSON_QUERY`` functions at the database level. The SQL Server :class:`_mssql.JSON` type necessarily makes use of the ``JSON_QUERY`` and ``JSON_VALUE`` functions when querying for elements of a JSON object. These two functions have a major restriction in that they are **mutually exclusive** based on the type of object to be returned. The ``JSON_QUERY`` function **only** returns a JSON dictionary or list, but not an individual string, numeric, or boolean element; the ``JSON_VALUE`` function **only** returns an individual string, numeric, or boolean element. **both functions either return NULL or raise an error if they are not used against the correct expected value**. To handle this awkward requirement, indexed access rules are as follows: 1. When extracting a sub element from a JSON that is itself a JSON dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor should be used:: stmt = select(data_table.c.data["some key"].as_json()).where( data_table.c.data["some key"].as_json() == {"sub": "structure"} ) 2. When extracting a sub element from a JSON that is a plain boolean, string, integer, or float, use the appropriate method among :meth:`_types.JSON.Comparator.as_boolean`, :meth:`_types.JSON.Comparator.as_string`, :meth:`_types.JSON.Comparator.as_integer`, :meth:`_types.JSON.Comparator.as_float`:: stmt = select(data_table.c.data["some key"].as_string()).where( data_table.c.data["some key"].as_string() == "some string" ) .. versionadded:: 1.4 """ # note there was a result processor here that was looking for "number", # but none of the tests seem to exercise it. # Note: these objects currently match exactly those of MySQL, however since # these are not generalizable to all JSON implementations, remain separately # implemented for each dialect.
JSON
python
huggingface__transformers
src/transformers/generation/continuous_batching/requests.py
{ "start": 3721, "end": 9763 }
class ____: """Tracks the state of a generation request through its lifecycle. Attributes: request_id (str): The ID of the generation request. full_prompt_ids (list[int] | None): The tokens IDs of the full prompt. prompt_ids (list[int] | None): The tokens IDs currently being processed. remaining_prompt_ids (list[int]): The tokens IDs remaining to be processed (for split requests). static_outputs (list[int]): The generated tokens. allocated_blocks (int): The number of blocks allocated to the request. position_offset (int): The current position in the sequence for position_ids. status (RequestStatus): The status of the request: can be one of PENDING, PREFILLING, PREFILLING_SPLIT, SPLIT_PENDING_REMAINDER, DECODING, FINISHED, FAILED max_new_tokens (int): The maximum number of new tokens to generate. eos_token_id (int): The ID of the end-of-sequence token. streaming (bool): Whether to stream tokens as they're generated created_time (float): The time the request was created. error (Optional[str]): Any error message associated with the request. When None, has had no error yet. """ # Required fields request_id: str initial_tokens: list[int] # Initial prompt tokens # Optional fields record_timestamps: bool = False # Whether to record timestamps for the generated tokens # Internal fields tokens_to_process: list[int] | None = None # Tokens IDs currently being processed remaining_prefill_tokens: list[int] = field(default_factory=list) # For split requests, prefill left to process generated_tokens: list[int] = field(default_factory=list) # Generated tokens allocated_blocks: int = 0 # Number of blocks allocated to the request position_offset: int = 0 # Current position in the sequence for position_ids _status: RequestStatus = RequestStatus.PENDING # Status of the request, hidden behind a property max_new_tokens: int = 20 # Maximum number of new tokens to generate eos_token_id: int = -1 # ID of the end-of-sequence token streaming: bool = False # Whether to stream tokens as they're generated created_time: float = field(default_factory=time.perf_counter) # Time the request was created error: str | None = None # Error message if the request failed lifespan: tuple[float, float] = (-1, -1) # (time request was no longer pending, time request finished) _timestamps: list[float] = field(default_factory=list) # Timestamps of the generated tokens @property def status(self) -> RequestStatus: return self._status @status.setter def status(self, value: RequestStatus): if self._status == RequestStatus.PENDING: self.lifespan = (time.perf_counter(), -1) elif value == RequestStatus.FINISHED: self.lifespan = (self.lifespan[0], time.perf_counter()) self.log_end_of_request() self._status = value @property def timestamps(self) -> list[float] | None: return self._timestamps if self.record_timestamps else None def log_end_of_request(self): prefill_len = len(self.initial_tokens) decode_len = self.generated_len() start_time = self.lifespan[0] - self.created_time end_time = self.lifespan[1] - self.created_time logger.info( f"Request {self.request_id} finished: {prefill_len = } {decode_len = } {start_time = } {end_time = }" ) def current_len(self) -> int: """Get the current length of the sequence (prompt + generated tokens).""" return self.position_offset def generated_len(self) -> int: """Get the number of tokens generated so far.""" return len(self.generated_tokens) # TODO: this logic seems one token off, check it out @traced def update_and_check_completion(self, token_id: int) -> bool: """Update the request with a newly generated token and check for completion. Args: token_id: The token ID to add to the output sequence Returns: bool: True if the request is now complete, False otherwise """ # Only update if we're in decoding state if self.status != RequestStatus.DECODING: return False # If we're recording timestamps, add timestamp to the list if self.record_timestamps: self._timestamps.append(time.perf_counter()) is_eos = token_id == self.eos_token_id and self.eos_token_id != -1 is_max_len = self.generated_len() >= self.max_new_tokens # Only add the token if we're not finishing due to max length # (EOS tokens should still be added to the output) if not (is_max_len and not is_eos): self.generated_tokens.extend([token_id]) if is_eos or is_max_len: self.status = RequestStatus.FINISHED return True return False def __repr__(self): msg = [ f"request_id={self.request_id}", f"status={self._status}", f"out_tokens={self.generated_len()}", f"query_length={len(self.tokens_to_process)}", f"remaining_tokens={len(self.remaining_prefill_tokens)}", f"kv_length={self.position_offset}", f"full_prompt_length={len(self.initial_tokens)}", f"allocated_blocks={self.allocated_blocks}", f"generated_tokens={self.generated_tokens}", ] return "RequestState(\n\t" + ",\n\t".join(msg) + "\n)" def to_generation_output(self): """Convert the request state to a GenerationOutput object.""" return GenerationOutput( request_id=self.request_id, prompt_ids=self.initial_tokens, status=self.status, generated_tokens=self.generated_tokens, logprobs=[], error=self.error, timestamps=self.timestamps, )
RequestState
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/integration_tests/lib/duckdb_component/step_four.py
{ "start": 210, "end": 310 }
class ____(BaseModel): sql_file: Optional[str] asset_key: Optional[str]
DuckDbScaffolderParams
python
pytorch__pytorch
torch/_numpy/_dtypes.py
{ "start": 937, "end": 996 }
class ____(integer): name = "signedinteger"
signedinteger
python
getsentry__sentry
src/sentry/utils/query.py
{ "start": 8293, "end": 9103 }
class ____[V](RangeQuerySetWrapperWithProgressBar[V]): """ Works the same as `RangeQuerySetWrapperWithProgressBar`, but approximates the number of rows in the table. This is intended for use on very large tables where we end up timing out attempting to get an accurate count. Note: This is only intended for queries that are iterating over an entire table. Will not produce a useful total count on filtered queries. """ def get_total_count(self) -> int: cursor = connections[self.queryset.db].cursor() cursor.execute( "SELECT CAST(GREATEST(reltuples, 0) AS BIGINT) AS estimate FROM pg_class WHERE relname = %s", (self.queryset.model._meta.db_table,), ) return cursor.fetchone()[0]
RangeQuerySetWrapperWithProgressBarApprox
python
google__jax
tests/scaled_matmul_stablehlo_test.py
{ "start": 9083, "end": 14896 }
class ____(jtu.JaxTestCase): def setUp(self): super().setUp() try: check_cudnn_version() except RuntimeError as e: self.skipTest(str(e)) if not jtu.is_cuda_compute_capability_at_least("10.0"): self.skipTest("Requires at least Blackwell arch") mxfp8_configs = create_mxfp8_configs() @jtu.sample_product( in_shardings=sharding_configs, block_scale_configs=[mxfp8_configs,], ) @jtu.run_on_devices("cuda") def test_collectives(self, in_shardings, block_scale_configs): if jtu.device_under_test() != "gpu" or len(jax.local_devices()) < 4: self.skipTest("Partition Test enabled for at least 4 GPUs") expected_hlo = sharding_configs[in_shardings][0] hlo_text = get_hlo_text(in_shardings, block_scale_configs) hlo_pattern = re.compile( r".*".join([re.escape(x) for x in expected_hlo]), flags=re.DOTALL ) self.assertRegex( hlo_text, hlo_pattern, msg=f"Failed to find pattern: {expected_hlo}" ) @jtu.sample_product( contract=[160, 96], lhs_non_contract=[240, 100], dtype=[jnp.float32, jnp.bfloat16, jnp.float16], ) @jtu.run_on_devices("cuda") def test_scaled_matmul_nvfp4( self, contract, lhs_non_contract, dtype, ): batch, rhs_non_contract = 2, 128 dot_config = ( (batch, lhs_non_contract, contract), (batch, rhs_non_contract, contract), (([2], [2]), ([0], [0])) ) _, (a_dq, b_dq), (a_q, b_q, a_s, b_s), block_scale_configs = ( generate_nvfp4_quantized_tensors(dot_config, dtype) ) a_gs = block_scale_configs[0].global_scale b_gs = block_scale_configs[1].global_scale def wrapper(lhs, rhs, lhs_scales, rhs_scales, out_type): out = scaled_matmul_wrapper( lhs, rhs, lhs_scales, rhs_scales, preferred_element_type=jnp.float32, ) gs = a_gs * b_gs return (out * gs).astype(out_type) j_scaled_matmul = jax.jit(partial(wrapper, out_type=dtype)) hlo_text = ( j_scaled_matmul.lower(a_q, b_q, a_s, b_s) .compile() .as_text() ) hlo_pattern = re.compile( r".*".join([re.escape(x) for x in ("custom-call", c_name)]) ) self.assertRegex(hlo_text, hlo_pattern) out = j_scaled_matmul(a_q, b_q, a_s, b_s) out_ref = jnp.einsum( "BMK,BNK->BMN", a_dq, b_dq ) self.assertArraysAllClose( out, out_ref.astype(dtype), rtol=1e-2, atol=5e-2 ) @jtu.sample_product( contract=[160, 96], lhs_non_contract=[240, 100], dtype=[jnp.float16, jnp.bfloat16, jnp.float32], block_scale_configs=[mxfp8_configs,], ) @jtu.run_on_devices("cuda") def test_scaled_matmul( self, contract, lhs_non_contract, dtype, block_scale_configs, ): batch, rhs_non_contract = 2, 128 a, b, a_q, b_q, a_scales, b_scales = generate_quantized_tensors( batch, lhs_non_contract, contract, rhs_non_contract, block_scale_configs, dtype=dtype, ) def wrapper(lhs, rhs, lhs_scales, rhs_scales, out_type): return scaled_matmul_wrapper( lhs, rhs, lhs_scales, rhs_scales, preferred_element_type=out_type, ) j_scaled_matmul = jax.jit(partial(wrapper, out_type=dtype)) hlo_text = ( j_scaled_matmul.lower(a_q, b_q, a_scales, b_scales) .compile() .as_text() ) hlo_pattern = re.compile( r".*".join([re.escape(x) for x in ("custom-call", c_name)]) ) self.assertRegex(hlo_text, hlo_pattern) out = j_scaled_matmul(a_q, b_q, a_scales, b_scales) out_ref = np.einsum( "BMK,BNK->BMN", a.astype(jnp.float32), b.astype(jnp.float32) ) self.assertArraysAllClose( out, out_ref.astype(dtype), rtol=1e-3, atol=1e-3 ) @jtu.sample_product( in_shardings=sharding_configs, block_scale_configs=[mxfp8_configs,], ) @jtu.run_on_devices("cuda") def test_scaled_matmul_sharded(self, in_shardings, block_scale_configs): if len(jax.local_devices()) < 4: self.skipTest("Require at least 4 devices to run sharding tests.") batch, contract, non_contract = 2, 1024, 256 a, b, a_q, b_q, a_scales, b_scales = generate_quantized_tensors( batch, non_contract, contract, non_contract, block_scale_configs, ) devices = np.array(jax.local_devices()[:4]) devices = devices.reshape((2, 2)) expected_output_spec = sharding_configs[in_shardings][1] with Mesh(devices, ("dp", "tp")) as mesh: a_q, b_q, a_scales, b_scales, input_shardings = ( shard_and_device_put( mesh, in_shardings[0], in_shardings[1], a_q, b_q, a_scales, b_scales, ) ) args = [a_q, b_q, a_scales, b_scales] j_scaled_matmul = jax.jit( scaled_matmul_wrapper, in_shardings=input_shardings ) hlo_compiled = j_scaled_matmul.lower(*args).compile() hlo_pattern = re.compile( r".*".join([re.escape(x) for x in ("custom-call", c_name)]) ) self.assertRegex(hlo_compiled.as_text(), hlo_pattern) j_ref = jax.jit( partial( jax.lax.dot_general, dimension_numbers=(([2], [2]), ([0], [0])), ), in_shardings=input_shardings[:2], ) out = j_scaled_matmul(*args) out_ref = j_ref(a, b) expected_output_sharding = NamedSharding( mesh=mesh, spec=expected_output_spec ) self.assertArraysAllClose(out, out_ref, rtol=1e-3, atol=1e-3) self.assertTrue( out.sharding.is_equivalent_to(expected_output_sharding, out.ndim) ) @jtu.with_config(jax_numpy_dtype_promotion="standard")
ScaledMatmulTest
python
getsentry__sentry
src/sentry/users/web/account_identity.py
{ "start": 553, "end": 1576 }
class ____(ControlSiloOrganizationView): @method_decorator(never_cache) def handle( self, request: HttpRequest, organization: Organization, provider_key: str, external_id: str ) -> HttpResponseBase: try: provider_model = IdentityProvider.objects.get( type=provider_key, external_id=external_id ) except IdentityProvider.DoesNotExist: return self.redirect(reverse("sentry-account-settings-identities")) pipeline = IdentityPipeline( organization=organization, provider_key=provider_key, provider_model=provider_model, request=request, ) if request.method != "POST" and not pipeline.is_valid(): context = {"provider": pipeline.provider, "organization": organization} return render_to_response("sentry/auth-link-identity.html", context, request) pipeline.initialize() return pipeline.current_step()
AccountIdentityAssociateView
python
python-markdown__markdown
markdown/inlinepatterns.py
{ "start": 11447, "end": 13072 }
class ____(Pattern): """ Base class that inline processors subclass. This is the newer style inline processor that uses a more efficient and flexible search approach. """ def __init__(self, pattern: str, md: Markdown | None = None): """ Create an instant of an inline processor. Arguments: pattern: A regular expression that matches a pattern. md: An optional pointer to the instance of `markdown.Markdown` and is available as `self.md` on the class instance. """ self.pattern = pattern self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE) # API for Markdown to pass `safe_mode` into instance self.safe_mode = False self.md = md def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str | None, int | None, int | None]: """Return a ElementTree element from the given match and the start and end index of the matched text. If `start` and/or `end` are returned as `None`, it will be assumed that the processor did not find a valid region of text. Subclasses should override this method. Arguments: m: A re match object containing a match of the pattern. data: The buffer currently under analysis. Returns: el: The ElementTree element, text or None. start: The start of the region that has been matched or None. end: The end of the region that has been matched or None. """ pass # pragma: no cover
InlineProcessor
python
getsentry__sentry
tests/sentry_plugins/github/test_provider.py
{ "start": 757, "end": 6300 }
class ____(TestCase): @cached_property def provider(self) -> GitHubRepositoryProvider: return GitHubRepositoryProvider("github") def test_compare_commits(self) -> None: repo = Repository.objects.create(provider="github", name="example", organization_id=1) res = self.provider._format_commits(repo, orjson.loads(COMPARE_COMMITS_EXAMPLE)["commits"]) assert res == [ { "author_email": "support@github.com", "author_name": "Monalisa Octocat", "message": "Fix all the bugs", "id": "6dcb09b5b57875f334f61aebed695e2e4193db5e", "repository": "example", } ] def test_get_last_commits(self) -> None: repo = Repository.objects.create(provider="github", name="example", organization_id=1) res = self.provider._format_commits(repo, orjson.loads(GET_LAST_COMMITS_EXAMPLE)[:10]) assert res == [ { "author_email": "support@github.com", "author_name": "Monalisa Octocat", "message": "Fix all the bugs", "id": "6dcb09b5b57875f334f61aebed695e2e4193db5e", "repository": "example", } ] @responses.activate def test_create_repository(self) -> None: responses.add( responses.POST, "https://api.github.com/repos/getsentry/example-repo/hooks", json={"id": "123456", "events": ["push", "pull_request"]}, ) user = self.create_user() organization = self.create_organization() self.create_usersocialauth( user=user, provider="github", extra_data={"access_token": "abcdefg"} ) data = {"name": "getsentry/example-repo", "external_id": "654321"} data = self.provider.create_repository(organization, data, user) assert data == { "config": { "name": "getsentry/example-repo", "webhook_id": "123456", "webhook_events": ["push", "pull_request"], }, "external_id": "654321", "name": "getsentry/example-repo", "url": "https://github.com/getsentry/example-repo", } request = responses.calls[-1].request req_json = orjson.loads(request.body) assert req_json == { "active": True, "config": { "url": f"http://testserver/plugins/github/organizations/{organization.id}/webhook/", "secret": self.provider.get_webhook_secret(organization), "content_type": "json", }, "name": "web", "events": ["push", "pull_request"], } @responses.activate def test_delete_repository(self) -> None: responses.add( responses.DELETE, "https://api.github.com/repos/getsentry/example-repo/hooks/123456", json={}, ) user = self.create_user() organization = self.create_organization() self.create_usersocialauth( user=user, provider="github", extra_data={"access_token": "abcdefg"} ) repo = Repository.objects.create( provider="github", name="example-repo", organization_id=organization.id, config={ "name": "getsentry/example-repo", "external_id": "654321", "webhook_id": "123456", }, ) self.provider.delete_repository(repo, user) @responses.activate def test_update_repository_without_webhook(self) -> None: responses.add( responses.POST, "https://api.github.com/repos/getsentry/example-repo/hooks", json={"id": "123456", "events": ["push", "pull_request"]}, ) user = self.create_user() organization = self.create_organization() self.create_usersocialauth( user=user, provider="github", extra_data={"access_token": "abcdefg"} ) repo = Repository.objects.create( provider="github", name="example-repo", organization_id=organization.id, config={"name": "getsentry/example-repo", "external_id": "654321"}, ) self.provider.update_repository(repo, user) assert repo.config["webhook_id"] == "123456" assert repo.config["webhook_events"] == ["push", "pull_request"] @responses.activate def test_update_repository_with_webhook(self) -> None: responses.add( responses.PATCH, "https://api.github.com/repos/getsentry/example-repo/hooks/123456", json={"id": "123456", "events": ["push", "pull_request"]}, ) user = self.create_user() organization = self.create_organization() self.create_usersocialauth( user=user, provider="github", extra_data={"access_token": "abcdefg"} ) repo = Repository.objects.create( provider="github", name="example-repo", organization_id=organization.id, config={ "name": "getsentry/example-repo", "external_id": "654321", "webhook_id": "123456", }, ) self.provider.update_repository(repo, user) assert repo.config["webhook_id"] == "123456" assert repo.config["webhook_events"] == ["push", "pull_request"]
GitHubPluginTest
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/property_graph/sub_retrievers/llm_synonym.py
{ "start": 949, "end": 5159 }
class ____(BasePGRetriever): """ A retriever that uses a language model to expand a query with synonyms. The synonyms are then used to retrieve nodes from a property graph. Args: graph_store (PropertyGraphStore): The graph store to retrieve data from. include_text (bool, optional): Whether to include source text in the retrieved nodes. Defaults to True. synonym_prompt (Union[BasePromptTemplate, str], optional): The template to use for the synonym expansion query. Defaults to DEFAULT_SYNONYM_EXPAND_TEMPLATE. max_keywords (int, optional): The maximum number of synonyms to generate. Defaults to 10. path_depth (int, optional): The depth of the path to retrieve for each node. Defaults to 1 (i.e. a triple). output_parsing_fn (Optional[callable], optional): A callable function to parse the output of the language model. Defaults to None. llm (Optional[LLM], optional): The language model to use. Defaults to Settings.llm. """ def __init__( self, graph_store: PropertyGraphStore, include_text: bool = True, include_properties: bool = False, synonym_prompt: Union[ BasePromptTemplate, str ] = DEFAULT_SYNONYM_EXPAND_TEMPLATE, max_keywords: int = 10, path_depth: int = 1, limit: int = 30, output_parsing_fn: Optional[Callable] = None, llm: Optional[LLM] = None, **kwargs: Any, ) -> None: self._llm = llm or Settings.llm if isinstance(synonym_prompt, str): synonym_prompt = PromptTemplate(synonym_prompt) self._synonym_prompt = synonym_prompt self._output_parsing_fn = output_parsing_fn self._max_keywords = max_keywords self._path_depth = path_depth self._limit = limit super().__init__( graph_store=graph_store, include_text=include_text, include_properties=include_properties, **kwargs, ) def _parse_llm_output(self, output: str) -> List[str]: if self._output_parsing_fn: matches = self._output_parsing_fn(output) else: matches = output.strip().split("^") # capitalize to normalize with ingestion return [x.strip().capitalize() for x in matches if x.strip()] def _prepare_matches( self, matches: List[str], limit: Optional[int] = None ) -> List[NodeWithScore]: kg_nodes = self._graph_store.get(ids=matches) triplets = self._graph_store.get_rel_map( kg_nodes, depth=self._path_depth, limit=limit or self._limit, ignore_rels=[KG_SOURCE_REL], ) return self._get_nodes_with_score(triplets) async def _aprepare_matches( self, matches: List[str], limit: Optional[int] = None ) -> List[NodeWithScore]: kg_nodes = await self._graph_store.aget(ids=matches) triplets = await self._graph_store.aget_rel_map( kg_nodes, depth=self._path_depth, limit=limit or self._limit, ignore_rels=[KG_SOURCE_REL], ) return self._get_nodes_with_score(triplets) def retrieve_from_graph( self, query_bundle: QueryBundle, limit: Optional[int] = None ) -> List[NodeWithScore]: response = self._llm.predict( self._synonym_prompt, query_str=query_bundle.query_str, max_keywords=self._max_keywords, ) matches = self._parse_llm_output(response) return self._prepare_matches(matches, limit=limit or self._limit) async def aretrieve_from_graph( self, query_bundle: QueryBundle, limit: Optional[int] = None ) -> List[NodeWithScore]: response = await self._llm.apredict( self._synonym_prompt, query_str=query_bundle.query_str, max_keywords=self._max_keywords, ) matches = self._parse_llm_output(response) return await self._aprepare_matches(matches, limit=limit or self._limit)
LLMSynonymRetriever
python
getsentry__sentry
tests/sentry/releases/endpoints/test_organization_release_meta.py
{ "start": 584, "end": 8156 }
class ____(APITestCase): def test_multiple_projects(self) -> None: user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team1 = self.create_team(organization=org) team2 = self.create_team(organization=org) project = self.create_project(teams=[team1], organization=org) project2 = self.create_project(teams=[team2], organization=org) release = Release.objects.create(organization_id=org.id, version="abcabcabc") release.add_project(project) release.add_project(project2) ReleaseProject.objects.filter(project=project, release=release).update(new_groups=10) ReleaseProject.objects.filter(project=project2, release=release).update(new_groups=10) ReleaseFile.objects.create( organization_id=project.organization_id, release_id=release.id, file=File.objects.create(name="application.js", type="release.file"), name="http://example.com/application.js", ) repo = Repository.objects.create(organization_id=project.organization_id, name=project.name) commit = Commit.objects.create( organization_id=project.organization_id, repository_id=repo.id, key="a" * 40 ) commit2 = Commit.objects.create( organization_id=project.organization_id, repository_id=repo.id, key="b" * 40 ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release, commit=commit, order=1 ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release, commit=commit2, order=0 ) CommitFileChange.objects.create( organization_id=project.organization_id, commit_id=commit.id, filename=".gitignore", type="M", ) CommitFileChange.objects.create( organization_id=project.organization_id, commit_id=commit2.id, filename="/static/js/widget.js", type="A", ) release.commit_count = 2 release.total_deploys = 1 release.save() self.create_member(teams=[team1, team2], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-meta", kwargs={"organization_id_or_slug": org.slug, "version": release.version}, ) response = self.client.get(url) assert response.status_code == 200, response.content data = orjson.loads(response.content) assert data["deployCount"] == 1 assert data["commitCount"] == 2 assert data["newGroups"] == 20 assert data["commitFilesChanged"] == 2 assert data["releaseFileCount"] == 1 assert len(data["projects"]) == 2 def test_artifact_count_without_weak_association(self) -> None: user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team1 = self.create_team(organization=org) project = self.create_project(teams=[team1], organization=org) release = Release.objects.create(organization_id=org.id, version="abcabcabc") release.add_project(project) self.create_release_archive(release=release.version) self.create_member(teams=[team1], user=user, organization=org) self.login_as(user=user) url = reverse( "sentry-api-0-organization-release-meta", kwargs={"organization_id_or_slug": org.slug, "version": release.version}, ) response = self.client.get(url) assert response.status_code == 200, response.content data = orjson.loads(response.content) assert data["releaseFileCount"] == 2 assert not data["isArtifactBundle"] def test_artifact_count_with_single_weak_association(self) -> None: user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team1 = self.create_team(organization=org) project = self.create_project(teams=[team1], organization=org) release = Release.objects.create(organization_id=org.id, version="abcabcabc") release.add_project(project) self.create_release_archive(release=release.version) self.create_member(teams=[team1], user=user, organization=org) self.login_as(user=user) bundle = self.create_artifact_bundle(org=org, artifact_count=10) ProjectArtifactBundle.objects.create( organization_id=org.id, project_id=project.id, artifact_bundle=bundle ) ReleaseArtifactBundle.objects.create( organization_id=org.id, release_name=release.version, artifact_bundle=bundle ) url = reverse( "sentry-api-0-organization-release-meta", kwargs={"organization_id_or_slug": org.slug, "version": release.version}, ) response = self.client.get(url) assert response.status_code == 200, response.content data = orjson.loads(response.content) assert data["releaseFileCount"] == 10 assert data["isArtifactBundle"] def test_artifact_count_with_multiple_weak_association(self) -> None: user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team1 = self.create_team(organization=org) project = self.create_project(teams=[team1], organization=org) release = Release.objects.create(organization_id=org.id, version="abcabcabc") release.add_project(project) self.create_release_archive(release=release.version) self.create_member(teams=[team1], user=user, organization=org) self.login_as(user=user) bundle_1 = self.create_artifact_bundle(org=org, artifact_count=10) ProjectArtifactBundle.objects.create( organization_id=org.id, project_id=project.id, artifact_bundle=bundle_1 ) ReleaseArtifactBundle.objects.create( organization_id=org.id, release_name=release.version, artifact_bundle=bundle_1 ) bundle_2 = self.create_artifact_bundle(org=org, artifact_count=30) ProjectArtifactBundle.objects.create( organization_id=org.id, project_id=project.id, artifact_bundle=bundle_2 ) ReleaseArtifactBundle.objects.create( organization_id=org.id, release_name=release.version, artifact_bundle=bundle_2 ) # We create a bundle associated with the same release but not connected to the project to make sure it is not # counted. bundle_3 = self.create_artifact_bundle(org=org, artifact_count=50) ReleaseArtifactBundle.objects.create( organization_id=org.id, release_name=release.version, artifact_bundle=bundle_3 ) url = reverse( "sentry-api-0-organization-release-meta", kwargs={"organization_id_or_slug": org.slug, "version": release.version}, ) response = self.client.get(url) assert response.status_code == 200, response.content data = orjson.loads(response.content) assert data["releaseFileCount"] == 40 assert data["isArtifactBundle"]
ReleaseMetaTest
python
Lightning-AI__lightning
examples/fabric/tensor_parallel/model.py
{ "start": 8624, "end": 10146 }
class ____(nn.Module): """FeedForward module. Args: dim (int): Input dimension. hidden_dim (int): Hidden dimension of the feedforward layer. multiple_of (int): Value to ensure hidden dimension is a multiple of this value. ffn_dim_multiplier (Optional[float]): Custom multiplier for hidden dimension. Defaults to None. Attributes: w1 (Linear): Linear transformation for the first layer. w2 (Linear): Linear transformation for the second layer. w3 (Linear): Linear transformation for the third layer. """ def __init__( self, dim: int, hidden_dim: int, multiple_of: int, ffn_dim_multiplier: Optional[float], ): super().__init__() hidden_dim = int(2 * hidden_dim / 3) # custom dim factor multiplier if ffn_dim_multiplier is not None: hidden_dim = int(ffn_dim_multiplier * hidden_dim) hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) self.w1 = nn.Linear(dim, hidden_dim, bias=False) self.w2 = nn.Linear(hidden_dim, dim, bias=False) self.w3 = nn.Linear(dim, hidden_dim, bias=False) def forward(self, x): return self.w2(F.silu(self.w1(x)) * self.w3(x)) def init_weights(self, init_std: float): nn.init.trunc_normal_(self.w1.weight, mean=0.0, std=0.02) for linear in (self.w2, self.w3): nn.init.trunc_normal_(linear.weight, mean=0.0, std=init_std)
FeedForward
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/mro2.py
{ "start": 418, "end": 651 }
class ____(B, C): pass a = A() a.foo("hello") b = B() b.foo(3) c = C() c.foo(a) d = D() d.foo(3) # This should generate an error because # the bar method from class C should be # selected before the bar method from A. d.bar()
D
python
fluentpython__example-code
10-seq-hacking/vector_v2.py
{ "start": 2227, "end": 3634 }
class ____: typecode = 'd' def __init__(self, components): self._components = array(self.typecode, components) def __iter__(self): return iter(self._components) def __repr__(self): components = reprlib.repr(self._components) components = components[components.find('['):-1] return 'Vector({})'.format(components) def __str__(self): return str(tuple(self)) def __bytes__(self): return (bytes([ord(self.typecode)]) + bytes(self._components)) def __eq__(self, other): return tuple(self) == tuple(other) def __abs__(self): return math.sqrt(sum(x * x for x in self)) def __bool__(self): return bool(abs(self)) # BEGIN VECTOR_V2 def __len__(self): return len(self._components) def __getitem__(self, index): cls = type(self) # <1> if isinstance(index, slice): # <2> return cls(self._components[index]) # <3> elif isinstance(index, numbers.Integral): # <4> return self._components[index] # <5> else: msg = '{cls.__name__} indices must be integers' raise TypeError(msg.format(cls=cls)) # <6> # END VECTOR_V2 @classmethod def frombytes(cls, octets): typecode = chr(octets[0]) memv = memoryview(octets[1:]).cast(typecode) return cls(memv)
Vector
python
openai__openai-python
src/openai/resources/fine_tuning/alpha/alpha.py
{ "start": 450, "end": 1339 }
class ____(SyncAPIResource): @cached_property def graders(self) -> Graders: return Graders(self._client) @cached_property def with_raw_response(self) -> AlphaWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return AlphaWithRawResponse(self) @cached_property def with_streaming_response(self) -> AlphaWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return AlphaWithStreamingResponse(self)
Alpha
python
great-expectations__great_expectations
great_expectations/expectations/metrics/column_map_metrics/column_values_not_match_regex_list.py
{ "start": 545, "end": 2194 }
class ____(ColumnMapMetricProvider): condition_metric_name = "column_values.not_match_regex_list" condition_value_keys = ("regex_list",) @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, regex_list, **kwargs): regex_matches = [] for regex in regex_list: regex_matches.append(column.astype(str).str.contains(regex)) regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True) return ~regex_match_df.any(axis="columns") @column_condition_partial(engine=SqlAlchemyExecutionEngine) def _sqlalchemy(cls, column, regex_list, _dialect, **kwargs): if len(regex_list) == 0: raise ValueError("At least one regex must be supplied in the regex_list.") # noqa: TRY003 # FIXME CoP regex_expression = get_dialect_regex_expression( column, regex_list[0], _dialect, positive=False ) if regex_expression is None: logger.warning(f"Regex is not supported for dialect {_dialect!s}") raise NotImplementedError return sa.and_( *( get_dialect_regex_expression(column, regex, _dialect, positive=False) for regex in regex_list ) ) @column_condition_partial(engine=SparkDFExecutionEngine) def _spark(cls, column, regex_list, **kwargs): compound = None for regex in regex_list: if compound is None: compound = ~column.rlike(regex) else: compound = compound & ~column.rlike(regex) return compound
ColumnValuesNotMatchRegexList
python
bokeh__bokeh
src/bokeh/models/widgets/tables.py
{ "start": 8487, "end": 8887 }
class ____(CellFormatter): ''' Boolean (check mark) cell formatter. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help=""" The icon visualizing the check mark. """)
BooleanFormatter
python
huggingface__transformers
src/transformers/models/evolla/modular_evolla.py
{ "start": 2279, "end": 4166 }
class ____(nn.Module): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int): super().__init__() # Generate and save the inverse frequency buffer (non trainable) inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) self.register_buffer("inv_freq", inv_freq) self._seq_len_cached = None self._cos_cached = None self._sin_cached = None def _update_cos_sin_tables(self, x, seq_dimension=2): seq_len = x.shape[seq_dimension] # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: self._seq_len_cached = seq_len t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self._cos_cached = emb.cos()[None, None, :, :] self._sin_cached = emb.sin()[None, None, :, :] return self._cos_cached, self._sin_cached def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2) return ( apply_rotary_pos_emb_esm(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype), apply_rotary_pos_emb_esm(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype), )
EvollaSaProtRotaryEmbedding
python
euske__pdfminer
pdfminer/pdfdocument.py
{ "start": 1033, "end": 1078 }
class ____(PDFException): pass
PDFNoOutlines
python
facebook__pyre-check
client/frontend_configuration.py
{ "start": 1885, "end": 7494 }
class ____(abc.ABC): @abc.abstractmethod def get_dot_pyre_directory(self) -> Path: raise NotImplementedError() @abc.abstractmethod def get_server_start_command( self, download_if_needed: bool = False ) -> Optional[ServerStartCommand]: raise NotImplementedError() @abc.abstractmethod def get_typeshed_location(self, download_if_needed: bool = False) -> Optional[Path]: raise NotImplementedError() @abc.abstractmethod def get_binary_version(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_content_for_display(self) -> str: raise NotImplementedError() @abc.abstractmethod def get_global_root(self) -> Path: raise NotImplementedError() @abc.abstractmethod def get_relative_local_root(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_excludes(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def is_strict(self) -> bool: raise NotImplementedError() @abc.abstractmethod def get_remote_logger(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_number_of_workers(self) -> int: raise NotImplementedError() @abc.abstractmethod def get_python_version(self) -> configuration_module.PythonVersion: raise NotImplementedError() @abc.abstractmethod def get_system_platform(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_shared_memory(self) -> configuration_module.SharedMemory: raise NotImplementedError() @abc.abstractmethod def get_valid_extension_suffixes(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def get_ignore_all_errors(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def get_only_check_paths(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def get_existent_user_specified_search_paths( self, ) -> List[configuration_module.search_path.Element]: raise NotImplementedError() @abc.abstractmethod def get_existent_source_directories( self, ) -> List[configuration_module.search_path.Element]: raise NotImplementedError() @abc.abstractmethod def get_existent_unwatched_dependency( self, ) -> Optional[configuration_module.unwatched.UnwatchedDependency]: raise NotImplementedError() @abc.abstractmethod def is_source_directories_defined( self, ) -> bool: raise NotImplementedError() @abc.abstractmethod def get_buck_targets( self, ) -> Optional[List[str]]: raise NotImplementedError() @abc.abstractmethod def get_buck_mode(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_buck_isolation_prefix(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_buck_bxl_builder(self) -> Optional[str]: raise NotImplementedError() @abc.abstractmethod def get_other_critical_files(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def get_taint_models_path(self) -> List[str]: raise NotImplementedError() @abc.abstractmethod def get_project_identifier(self) -> str: raise NotImplementedError() @abc.abstractmethod def get_enable_readonly_analysis(self) -> Optional[bool]: raise NotImplementedError() @abc.abstractmethod def get_enable_strict_override_check(self) -> Optional[bool]: raise NotImplementedError() @abc.abstractmethod def get_enable_strict_any_check(self) -> Optional[bool]: raise NotImplementedError() @abc.abstractmethod def get_enable_unawaited_awaitable_analysis(self) -> Optional[bool]: raise NotImplementedError() @abc.abstractmethod def get_saved_state_project(self) -> Optional[SavedStateProject]: raise NotImplementedError() @abc.abstractmethod def get_include_suppressed_errors(self) -> Optional[bool]: raise NotImplementedError() @abc.abstractmethod def get_only_privacy_errors(self) -> bool: raise NotImplementedError() def get_local_root(self) -> Optional[Path]: relative_local_root = self.get_relative_local_root() if relative_local_root is None: return None return self.get_global_root() / relative_local_root def get_log_directory(self) -> Path: dot_pyre_directory = self.get_dot_pyre_directory() relative_local_root = self.get_relative_local_root() return ( dot_pyre_directory if relative_local_root is None else dot_pyre_directory / relative_local_root ) def get_existent_typeshed_search_paths( self, ) -> List[configuration_module.search_path.Element]: typeshed_root = self.get_typeshed_location(download_if_needed=True) if typeshed_root is None: return [] return [ configuration_module.search_path.SimpleElement(str(element)) for element in find_directories.find_typeshed_search_paths(typeshed_root) ] def get_existent_search_paths( self, ) -> List[configuration_module.search_path.Element]: return [ *self.get_existent_user_specified_search_paths(), *self.get_existent_typeshed_search_paths(), ]
Base
python
getsentry__sentry
src/sentry/sentry_apps/api/bases/sentryapps.py
{ "start": 9823, "end": 10045 }
class ____(StaffPermissionMixin, SentryAppPermission): """Allows staff to access sentry app endpoints. Note that this is used for endpoints acting on a single sentry app only.""" pass
SentryAppAndStaffPermission
python
realpython__materials
python-312/error-messages/local_self.py
{ "start": 17, "end": 556 }
class ____: def __init__(self): self.message = "Hello" def greet(self, whom="World"): frame = inspect.currentframe() wrong_name = "message" if "self" in frame.f_locals: self = frame.f_locals["self"] if hasattr(self, wrong_name): raise NameError( ( f"name '{wrong_name}' is not defined. " f"Did you mean: 'self.{wrong_name}'?" ) ) Greeter().greet()
Greeter
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py
{ "start": 31966, "end": 32469 }
class ____(FieldSerializer): def pack(self, value: Any, whitelist_map: WhitelistMap, descent_path: str) -> Any: return None def unpack(self, value: Any, whitelist_map: WhitelistMap, context: UnpackContext) -> Any: return None @whitelist_for_serdes( field_serializers={"instance": NullFieldSerializer}, kwargs_fields={"instance"}, skip_when_none_fields={"instance"}, ) @record_custom( field_to_new_mapping={ "class_name": "inst", } )
NullFieldSerializer
python
langchain-ai__langchain
libs/core/langchain_core/load/serializable.py
{ "start": 329, "end": 673 }
class ____(TypedDict): """Base class for serialized objects.""" lc: int """The version of the serialization format.""" id: list[str] """The unique identifier of the object.""" name: NotRequired[str] """The name of the object.""" graph: NotRequired[dict[str, Any]] """The graph of the object."""
BaseSerialized
python
matplotlib__matplotlib
lib/matplotlib/text.py
{ "start": 6202, "end": 51149 }
class ____(Artist): """Handle storing and drawing of text in window or data coordinates.""" zorder = 3 _charsize_cache = dict() def __repr__(self): return f"Text({self._x}, {self._y}, {self._text!r})" def __init__(self, x=0, y=0, text='', *, color=None, # defaults to rc params verticalalignment='baseline', horizontalalignment='left', multialignment=None, fontproperties=None, # defaults to FontProperties() rotation=None, linespacing=None, rotation_mode=None, usetex=None, # defaults to rcParams['text.usetex'] wrap=False, transform_rotates_text=False, parse_math=None, # defaults to rcParams['text.parse_math'] antialiased=None, # defaults to rcParams['text.antialiased'] **kwargs ): """ Create a `.Text` instance at *x*, *y* with string *text*. The text is aligned relative to the anchor point (*x*, *y*) according to ``horizontalalignment`` (default: 'left') and ``verticalalignment`` (default: 'baseline'). See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. While Text accepts the 'label' keyword argument, by default it is not added to the handles of a legend. Valid keyword arguments are: %(Text:kwdoc)s """ super().__init__() self._x, self._y = x, y self._text = '' self._reset_visual_defaults( text=text, color=color, fontproperties=fontproperties, usetex=usetex, parse_math=parse_math, wrap=wrap, verticalalignment=verticalalignment, horizontalalignment=horizontalalignment, multialignment=multialignment, rotation=rotation, transform_rotates_text=transform_rotates_text, linespacing=linespacing, rotation_mode=rotation_mode, antialiased=antialiased ) self.update(kwargs) def _reset_visual_defaults( self, text='', color=None, fontproperties=None, usetex=None, parse_math=None, wrap=False, verticalalignment='baseline', horizontalalignment='left', multialignment=None, rotation=None, transform_rotates_text=False, linespacing=None, rotation_mode=None, antialiased=None ): self.set_text(text) self.set_color(mpl._val_or_rc(color, "text.color")) self.set_fontproperties(fontproperties) self.set_usetex(usetex) self.set_parse_math(mpl._val_or_rc(parse_math, 'text.parse_math')) self.set_wrap(wrap) self.set_verticalalignment(verticalalignment) self.set_horizontalalignment(horizontalalignment) self._multialignment = multialignment self.set_rotation(rotation) self._transform_rotates_text = transform_rotates_text self._bbox_patch = None # a FancyBboxPatch instance self._renderer = None if linespacing is None: linespacing = 1.2 # Maybe use rcParam later. self.set_linespacing(linespacing) self.set_rotation_mode(rotation_mode) self.set_antialiased(mpl._val_or_rc(antialiased, 'text.antialiased')) def update(self, kwargs): # docstring inherited ret = [] kwargs = cbook.normalize_kwargs(kwargs, Text) sentinel = object() # bbox can be None, so use another sentinel. # Update fontproperties first, as it has lowest priority. fontproperties = kwargs.pop("fontproperties", sentinel) if fontproperties is not sentinel: ret.append(self.set_fontproperties(fontproperties)) # Update bbox last, as it depends on font properties. bbox = kwargs.pop("bbox", sentinel) ret.extend(super().update(kwargs)) if bbox is not sentinel: ret.append(self.set_bbox(bbox)) return ret def __getstate__(self): d = super().__getstate__() # remove the cached _renderer (if it exists) d['_renderer'] = None return d def contains(self, mouseevent): """ Return whether the mouse event occurred inside the axis-aligned bounding-box of the text. """ if (self._different_canvas(mouseevent) or not self.get_visible() or self._renderer is None): return False, {} # Explicitly use Text.get_window_extent(self) and not # self.get_window_extent() so that Annotation.contains does not # accidentally cover the entire annotation bounding box. bbox = Text.get_window_extent(self) inside = (bbox.x0 <= mouseevent.x <= bbox.x1 and bbox.y0 <= mouseevent.y <= bbox.y1) cattr = {} # if the text has a surrounding patch, also check containment for it, # and merge the results with the results for the text. if self._bbox_patch: patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent) inside = inside or patch_inside cattr["bbox_patch"] = patch_cattr return inside, cattr def _get_xy_display(self): """ Get the (possibly unit converted) transformed x, y in display coords. """ x, y = self.get_unitless_position() return self.get_transform().transform((x, y)) def _get_multialignment(self): if self._multialignment is not None: return self._multialignment else: return self._horizontalalignment def _char_index_at(self, x): """ Calculate the index closest to the coordinate x in display space. The position of text[index] is assumed to be the sum of the widths of all preceding characters text[:index]. This works only on single line texts. """ if not self._text: return 0 text = self._text fontproperties = str(self._fontproperties) if fontproperties not in Text._charsize_cache: Text._charsize_cache[fontproperties] = dict() charsize_cache = Text._charsize_cache[fontproperties] for char in set(text): if char not in charsize_cache: self.set_text(char) bb = self.get_window_extent() charsize_cache[char] = bb.x1 - bb.x0 self.set_text(text) bb = self.get_window_extent() size_accum = np.cumsum([0] + [charsize_cache[x] for x in text]) std_x = x - bb.x0 return (np.abs(size_accum - std_x)).argmin() def get_rotation(self): """Return the text angle in degrees between 0 and 360.""" if self.get_transform_rotates_text(): return self.get_transform().transform_angles( [self._rotation], [self.get_unitless_position()]).item(0) else: return self._rotation def get_transform_rotates_text(self): """ Return whether rotations of the transform affect the text direction. """ return self._transform_rotates_text def set_rotation_mode(self, m): """ Set text rotation mode. Parameters ---------- m : {None, 'default', 'anchor', 'xtick', 'ytick'} If ``"default"``, the text will be first rotated, then aligned according to their horizontal and vertical alignments. If ``"anchor"``, then alignment occurs before rotation. "xtick" and "ytick" adjust the horizontal/vertical alignment so that the text is visually pointing towards its anchor point. This is primarily used for rotated tick labels and positions them nicely towards their ticks. Passing ``None`` will set the rotation mode to ``"default"``. """ if m is None: m = "default" else: _api.check_in_list(("anchor", "default", "xtick", "ytick"), rotation_mode=m) self._rotation_mode = m self.stale = True def get_rotation_mode(self): """Return the text rotation mode.""" return self._rotation_mode def set_antialiased(self, antialiased): """ Set whether to use antialiased rendering. Parameters ---------- antialiased : bool Notes ----- Antialiasing will be determined by :rc:`text.antialiased` and the parameter *antialiased* will have no effect if the text contains math expressions. """ self._antialiased = antialiased self.stale = True def get_antialiased(self): """Return whether antialiased rendering is used.""" return self._antialiased def update_from(self, other): # docstring inherited super().update_from(other) self._color = other._color self._multialignment = other._multialignment self._verticalalignment = other._verticalalignment self._horizontalalignment = other._horizontalalignment self._fontproperties = other._fontproperties.copy() self._usetex = other._usetex self._rotation = other._rotation self._transform_rotates_text = other._transform_rotates_text self._picker = other._picker self._linespacing = other._linespacing self._antialiased = other._antialiased self.stale = True def _get_layout(self, renderer): """ Return the extent (bbox) of the text together with multiple-alignment information. Note that it returns an extent of a rotated text when necessary. """ thisx, thisy = 0.0, 0.0 lines = self._get_wrapped_text().split("\n") # Ensures lines is not empty. ws = [] hs = [] xs = [] ys = [] # Full vertical extent of font, including ascenders and descenders: _, lp_h, lp_d = _get_text_metrics_with_cache( renderer, "lp", self._fontproperties, ismath="TeX" if self.get_usetex() else False, dpi=self.get_figure(root=True).dpi) min_dy = (lp_h - lp_d) * self._linespacing for i, line in enumerate(lines): clean_line, ismath = self._preprocess_math(line) if clean_line: w, h, d = _get_text_metrics_with_cache( renderer, clean_line, self._fontproperties, ismath=ismath, dpi=self.get_figure(root=True).dpi) else: w = h = d = 0 # For multiline text, increase the line spacing when the text # net-height (excluding baseline) is larger than that of a "l" # (e.g., use of superscripts), which seems what TeX does. h = max(h, lp_h) d = max(d, lp_d) ws.append(w) hs.append(h) # Metrics of the last line that are needed later: baseline = (h - d) - thisy if i == 0: # position at baseline thisy = -(h - d) else: # put baseline a good distance from bottom of previous line thisy -= max(min_dy, (h - d) * self._linespacing) xs.append(thisx) # == 0. ys.append(thisy) thisy -= d # Metrics of the last line that are needed later: descent = d # Bounding box definition: width = max(ws) xmin = 0 xmax = width ymax = 0 ymin = ys[-1] - descent # baseline of last line minus its descent # get the rotation matrix M = Affine2D().rotate_deg(self.get_rotation()) # now offset the individual text lines within the box malign = self._get_multialignment() if malign == 'left': offset_layout = [(x, y) for x, y in zip(xs, ys)] elif malign == 'center': offset_layout = [(x + width / 2 - w / 2, y) for x, y, w in zip(xs, ys, ws)] elif malign == 'right': offset_layout = [(x + width - w, y) for x, y, w in zip(xs, ys, ws)] # the corners of the unrotated bounding box corners_horiz = np.array( [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]) # now rotate the bbox corners_rotated = M.transform(corners_horiz) # compute the bounds of the rotated box xmin = corners_rotated[:, 0].min() xmax = corners_rotated[:, 0].max() ymin = corners_rotated[:, 1].min() ymax = corners_rotated[:, 1].max() width = xmax - xmin height = ymax - ymin # Now move the box to the target position offset the display # bbox by alignment halign = self._horizontalalignment valign = self._verticalalignment rotation_mode = self.get_rotation_mode() if rotation_mode != "anchor": angle = self.get_rotation() if rotation_mode == 'xtick': halign = self._ha_for_angle(angle) elif rotation_mode == 'ytick': valign = self._va_for_angle(angle) # compute the text location in display coords and the offsets # necessary to align the bbox with that location if halign == 'center': offsetx = (xmin + xmax) / 2 elif halign == 'right': offsetx = xmax else: offsetx = xmin if valign == 'center': offsety = (ymin + ymax) / 2 elif valign == 'top': offsety = ymax elif valign == 'baseline': offsety = ymin + descent elif valign == 'center_baseline': offsety = ymin + height - baseline / 2.0 else: offsety = ymin else: xmin1, ymin1 = corners_horiz[0] xmax1, ymax1 = corners_horiz[2] if halign == 'center': offsetx = (xmin1 + xmax1) / 2.0 elif halign == 'right': offsetx = xmax1 else: offsetx = xmin1 if valign == 'center': offsety = (ymin1 + ymax1) / 2.0 elif valign == 'top': offsety = ymax1 elif valign == 'baseline': offsety = ymax1 - baseline elif valign == 'center_baseline': offsety = ymax1 - baseline / 2.0 else: offsety = ymin1 offsetx, offsety = M.transform((offsetx, offsety)) xmin -= offsetx ymin -= offsety bbox = Bbox.from_bounds(xmin, ymin, width, height) # now rotate the positions around the first (x, y) position xys = M.transform(offset_layout) - (offsetx, offsety) return bbox, list(zip(lines, zip(ws, hs), *xys.T)), descent def set_bbox(self, rectprops): """ Draw a box behind/around the text. This can be used to set a background and/or a frame around the text. It's realized through a `.FancyBboxPatch` behind the text (see also `.Text.get_bbox_patch`). The bbox patch is None by default and only created when needed. Parameters ---------- rectprops : dict with properties for `.FancyBboxPatch` or None The default boxstyle is 'square'. The mutation scale of the `.patches.FancyBboxPatch` is set to the fontsize. Pass ``None`` to remove the bbox patch completely. Examples -------- :: t.set_bbox(dict(facecolor='red', alpha=0.5)) """ if rectprops is not None: props = rectprops.copy() boxstyle = props.pop("boxstyle", None) pad = props.pop("pad", None) if boxstyle is None: boxstyle = "square" if pad is None: pad = 4 # points pad /= self.get_size() # to fraction of font size else: if pad is None: pad = 0.3 # boxstyle could be a callable or a string if isinstance(boxstyle, str) and "pad" not in boxstyle: boxstyle += ",pad=%0.2f" % pad self._bbox_patch = FancyBboxPatch( (0, 0), 1, 1, boxstyle=boxstyle, transform=IdentityTransform(), **props) else: self._bbox_patch = None self._update_clip_properties() def get_bbox_patch(self): """ Return the bbox Patch, or None if the `.patches.FancyBboxPatch` is not made. For more details see `.Text.set_bbox`. """ return self._bbox_patch def update_bbox_position_size(self, renderer): """ Update the location and the size of the bbox. This method should be used when the position and size of the bbox needs to be updated before actually drawing the bbox. """ if self._bbox_patch: # don't use self.get_unitless_position here, which refers to text # position in Text: posx = float(self.convert_xunits(self._x)) posy = float(self.convert_yunits(self._y)) posx, posy = self.get_transform().transform((posx, posy)) x_box, y_box, w_box, h_box = _get_textbox(self, renderer) self._bbox_patch.set_bounds(0., 0., w_box, h_box) self._bbox_patch.set_transform( Affine2D() .rotate_deg(self.get_rotation()) .translate(posx + x_box, posy + y_box)) fontsize_in_pixel = renderer.points_to_pixels(self.get_size()) self._bbox_patch.set_mutation_scale(fontsize_in_pixel) def _update_clip_properties(self): if self._bbox_patch: clipprops = dict(clip_box=self.clipbox, clip_path=self._clippath, clip_on=self._clipon) self._bbox_patch.update(clipprops) def set_clip_box(self, clipbox): # docstring inherited. super().set_clip_box(clipbox) self._update_clip_properties() def set_clip_path(self, path, transform=None): # docstring inherited. super().set_clip_path(path, transform) self._update_clip_properties() def set_clip_on(self, b): # docstring inherited. super().set_clip_on(b) self._update_clip_properties() def get_wrap(self): """Return whether the text can be wrapped.""" return self._wrap def set_wrap(self, wrap): """ Set whether the text can be wrapped. Wrapping makes sure the text is confined to the (sub)figure box. It does not take into account any other artists. Parameters ---------- wrap : bool Notes ----- Wrapping does not work together with ``savefig(..., bbox_inches='tight')`` (which is also used internally by ``%matplotlib inline`` in IPython/Jupyter). The 'tight' setting rescales the canvas to accommodate all content and happens before wrapping. """ self._wrap = wrap def _get_wrap_line_width(self): """ Return the maximum line width for wrapping text based on the current orientation. """ x0, y0 = self.get_transform().transform(self.get_position()) figure_box = self.get_figure().get_window_extent() # Calculate available width based on text alignment alignment = self.get_horizontalalignment() self.set_rotation_mode('anchor') rotation = self.get_rotation() left = self._get_dist_to_box(rotation, x0, y0, figure_box) right = self._get_dist_to_box( (180 + rotation) % 360, x0, y0, figure_box) if alignment == 'left': line_width = left elif alignment == 'right': line_width = right else: line_width = 2 * min(left, right) return line_width def _get_dist_to_box(self, rotation, x0, y0, figure_box): """ Return the distance from the given points to the boundaries of a rotated box, in pixels. """ if rotation > 270: quad = rotation - 270 h1 = (y0 - figure_box.y0) / math.cos(math.radians(quad)) h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad)) elif rotation > 180: quad = rotation - 180 h1 = (x0 - figure_box.x0) / math.cos(math.radians(quad)) h2 = (y0 - figure_box.y0) / math.cos(math.radians(90 - quad)) elif rotation > 90: quad = rotation - 90 h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad)) h2 = (x0 - figure_box.x0) / math.cos(math.radians(90 - quad)) else: h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation)) h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation)) return min(h1, h2) def _get_rendered_text_width(self, text): """ Return the width of a given text string, in pixels. """ w, h, d = _get_text_metrics_with_cache( self._renderer, text, self.get_fontproperties(), cbook.is_math_text(text), self.get_figure(root=True).dpi) return math.ceil(w) def _get_wrapped_text(self): """ Return a copy of the text string with new lines added so that the text is wrapped relative to the parent figure (if `get_wrap` is True). """ if not self.get_wrap(): return self.get_text() # Not fit to handle breaking up latex syntax correctly, so # ignore latex for now. if self.get_usetex(): return self.get_text() # Build the line incrementally, for a more accurate measure of length line_width = self._get_wrap_line_width() wrapped_lines = [] # New lines in the user's text force a split unwrapped_lines = self.get_text().split('\n') # Now wrap each individual unwrapped line for unwrapped_line in unwrapped_lines: sub_words = unwrapped_line.split(' ') # Remove items from sub_words as we go, so stop when empty while len(sub_words) > 0: if len(sub_words) == 1: # Only one word, so just add it to the end wrapped_lines.append(sub_words.pop(0)) continue for i in range(2, len(sub_words) + 1): # Get width of all words up to and including here line = ' '.join(sub_words[:i]) current_width = self._get_rendered_text_width(line) # If all these words are too wide, append all not including # last word if current_width > line_width: wrapped_lines.append(' '.join(sub_words[:i - 1])) sub_words = sub_words[i - 1:] break # Otherwise if all words fit in the width, append them all elif i == len(sub_words): wrapped_lines.append(' '.join(sub_words[:i])) sub_words = [] break return '\n'.join(wrapped_lines) @artist.allow_rasterization def draw(self, renderer): # docstring inherited if renderer is not None: self._renderer = renderer if not self.get_visible(): return if self.get_text() == '': return renderer.open_group('text', self.get_gid()) with self._cm_set(text=self._get_wrapped_text()): bbox, info, descent = self._get_layout(renderer) trans = self.get_transform() # don't use self.get_position here, which refers to text # position in Text: x, y = self._x, self._y if np.ma.is_masked(x): x = np.nan if np.ma.is_masked(y): y = np.nan posx = float(self.convert_xunits(x)) posy = float(self.convert_yunits(y)) posx, posy = trans.transform((posx, posy)) if np.isnan(posx) or np.isnan(posy): return # don't throw a warning here if not np.isfinite(posx) or not np.isfinite(posy): _log.warning("posx and posy should be finite values") return canvasw, canvash = renderer.get_canvas_width_height() # Update the location and size of the bbox # (`.patches.FancyBboxPatch`), and draw it. if self._bbox_patch: self.update_bbox_position_size(renderer) self._bbox_patch.draw(renderer) gc = renderer.new_gc() gc.set_foreground(self.get_color()) gc.set_alpha(self.get_alpha()) gc.set_url(self._url) gc.set_antialiased(self._antialiased) self._set_gc_clip(gc) angle = self.get_rotation() for line, wh, x, y in info: mtext = self if len(info) == 1 else None x = x + posx y = y + posy if renderer.flipy(): y = canvash - y clean_line, ismath = self._preprocess_math(line) if self.get_path_effects(): from matplotlib.patheffects import PathEffectRenderer textrenderer = PathEffectRenderer( self.get_path_effects(), renderer) else: textrenderer = renderer if self.get_usetex(): textrenderer.draw_tex(gc, x, y, clean_line, self._fontproperties, angle, mtext=mtext) else: textrenderer.draw_text(gc, x, y, clean_line, self._fontproperties, angle, ismath=ismath, mtext=mtext) gc.restore() renderer.close_group('text') self.stale = False def get_color(self): """Return the color of the text.""" return self._color def get_fontproperties(self): """Return the `.font_manager.FontProperties`.""" return self._fontproperties def get_fontfamily(self): """ Return the list of font families used for font lookup. See Also -------- .font_manager.FontProperties.get_family """ return self._fontproperties.get_family() def get_fontname(self): """ Return the font name as a string. See Also -------- .font_manager.FontProperties.get_name """ return self._fontproperties.get_name() def get_fontstyle(self): """ Return the font style as a string. See Also -------- .font_manager.FontProperties.get_style """ return self._fontproperties.get_style() def get_fontsize(self): """ Return the font size as an integer. See Also -------- .font_manager.FontProperties.get_size_in_points """ return self._fontproperties.get_size_in_points() def get_fontvariant(self): """ Return the font variant as a string. See Also -------- .font_manager.FontProperties.get_variant """ return self._fontproperties.get_variant() def get_fontweight(self): """ Return the font weight as a string or a number. See Also -------- .font_manager.FontProperties.get_weight """ return self._fontproperties.get_weight() def get_stretch(self): """ Return the font stretch as a string or a number. See Also -------- .font_manager.FontProperties.get_stretch """ return self._fontproperties.get_stretch() def get_horizontalalignment(self): """ Return the horizontal alignment as a string. Will be one of 'left', 'center' or 'right'. """ return self._horizontalalignment def get_unitless_position(self): """Return the (x, y) unitless position of the text.""" # This will get the position with all unit information stripped away. # This is here for convenience since it is done in several locations. x = float(self.convert_xunits(self._x)) y = float(self.convert_yunits(self._y)) return x, y def get_position(self): """Return the (x, y) position of the text.""" # This should return the same data (possible unitized) as was # specified with 'set_x' and 'set_y'. return self._x, self._y def get_text(self): """Return the text string.""" return self._text def get_verticalalignment(self): """ Return the vertical alignment as a string. Will be one of 'top', 'center', 'bottom', 'baseline' or 'center_baseline'. """ return self._verticalalignment def get_window_extent(self, renderer=None, dpi=None): """ Return the `.Bbox` bounding the text, in display units. In addition to being used internally, this is useful for specifying clickable regions in a png file on a web page. Parameters ---------- renderer : Renderer, optional A renderer is needed to compute the bounding box. If the artist has already been drawn, the renderer is cached; thus, it is only necessary to pass this argument when calling `get_window_extent` before the first draw. In practice, it is usually easier to trigger a draw first, e.g. by calling `~.Figure.draw_without_rendering` or ``plt.show()``. dpi : float, optional The dpi value for computing the bbox, defaults to ``self.get_figure(root=True).dpi`` (*not* the renderer dpi); should be set e.g. if to match regions with a figure saved with a custom dpi value. """ if not self.get_visible(): return Bbox.unit() fig = self.get_figure(root=True) if dpi is None: dpi = fig.dpi if self.get_text() == '': with cbook._setattr_cm(fig, dpi=dpi): tx, ty = self._get_xy_display() return Bbox.from_bounds(tx, ty, 0, 0) if renderer is not None: self._renderer = renderer if self._renderer is None: self._renderer = fig._get_renderer() if self._renderer is None: raise RuntimeError( "Cannot get window extent of text w/o renderer. You likely " "want to call 'figure.draw_without_rendering()' first.") with cbook._setattr_cm(fig, dpi=dpi): bbox, info, descent = self._get_layout(self._renderer) x, y = self.get_unitless_position() x, y = self.get_transform().transform((x, y)) bbox = bbox.translated(x, y) return bbox def set_backgroundcolor(self, color): """ Set the background color of the text. This is realized through the bbox (see `.set_bbox`), creating the bbox patch if needed. Parameters ---------- color : :mpltype:`color` See Also -------- .set_bbox : To change the position of the bounding box """ if self._bbox_patch is None: self.set_bbox(dict(facecolor=color, edgecolor=color)) else: self._bbox_patch.update(dict(facecolor=color)) self._update_clip_properties() self.stale = True def set_color(self, color): """ Set the foreground color of the text Parameters ---------- color : :mpltype:`color` """ # "auto" is only supported by axisartist, but we can just let it error # out at draw time for simplicity. if not cbook._str_equal(color, "auto"): mpl.colors._check_color_like(color=color) self._color = color self.stale = True def set_horizontalalignment(self, align): """ Set the horizontal alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'left', 'center', 'right'} """ _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True def set_multialignment(self, align): """ Set the text alignment for multiline texts. The layout of the bounding box of all the lines is determined by the horizontalalignment and verticalalignment properties. This property controls the alignment of the text lines within that box. Parameters ---------- align : {'left', 'right', 'center'} """ _api.check_in_list(['center', 'right', 'left'], align=align) self._multialignment = align self.stale = True def set_linespacing(self, spacing): """ Set the line spacing as a multiple of the font size. The default line spacing is 1.2. Parameters ---------- spacing : float (multiple of font size) """ _api.check_isinstance(Real, spacing=spacing) self._linespacing = spacing self.stale = True def set_fontfamily(self, fontname): """ Set the font family. Can be either a single string, or a list of strings in decreasing priority. Each string may be either a real font name or a generic font class name. If the latter, the specific font names will be looked up in the corresponding rcParams. If a `Text` instance is constructed with ``fontfamily=None``, then the font is set to :rc:`font.family`, and the same is done when `set_fontfamily()` is called on an existing `Text` instance. Parameters ---------- fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \ 'monospace'} See Also -------- .font_manager.FontProperties.set_family """ self._fontproperties.set_family(fontname) self.stale = True def set_fontvariant(self, variant): """ Set the font variant. Parameters ---------- variant : {'normal', 'small-caps'} See Also -------- .font_manager.FontProperties.set_variant """ self._fontproperties.set_variant(variant) self.stale = True def set_fontstyle(self, fontstyle): """ Set the font style. Parameters ---------- fontstyle : {'normal', 'italic', 'oblique'} See Also -------- .font_manager.FontProperties.set_style """ self._fontproperties.set_style(fontstyle) self.stale = True def set_fontsize(self, fontsize): """ Set the font size. Parameters ---------- fontsize : float or {'xx-small', 'x-small', 'small', 'medium', \ 'large', 'x-large', 'xx-large'} If a float, the fontsize in points. The string values denote sizes relative to the default font size. See Also -------- .font_manager.FontProperties.set_size """ self._fontproperties.set_size(fontsize) self.stale = True def get_math_fontfamily(self): """ Return the font family name for math text rendered by Matplotlib. The default value is :rc:`mathtext.fontset`. See Also -------- set_math_fontfamily """ return self._fontproperties.get_math_fontfamily() def set_math_fontfamily(self, fontfamily): """ Set the font family for math text rendered by Matplotlib. This does only affect Matplotlib's own math renderer. It has no effect when rendering with TeX (``usetex=True``). Parameters ---------- fontfamily : str The name of the font family. Available font families are defined in the :ref:`default matplotlibrc file <customizing-with-matplotlibrc-files>`. See Also -------- get_math_fontfamily """ self._fontproperties.set_math_fontfamily(fontfamily) def set_fontweight(self, weight): """ Set the font weight. Parameters ---------- weight : {a numeric value in range 0-1000, 'ultralight', 'light', \ 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', \ 'demi', 'bold', 'heavy', 'extra bold', 'black'} See Also -------- .font_manager.FontProperties.set_weight """ self._fontproperties.set_weight(weight) self.stale = True def set_fontstretch(self, stretch): """ Set the font stretch (horizontal condensation or expansion). Parameters ---------- stretch : {a numeric value in range 0-1000, 'ultra-condensed', \ 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', \ 'expanded', 'extra-expanded', 'ultra-expanded'} See Also -------- .font_manager.FontProperties.set_stretch """ self._fontproperties.set_stretch(stretch) self.stale = True def set_position(self, xy): """ Set the (*x*, *y*) position of the text. Parameters ---------- xy : (float, float) """ self.set_x(xy[0]) self.set_y(xy[1]) def set_x(self, x): """ Set the *x* position of the text. Parameters ---------- x : float """ self._x = x self.stale = True def set_y(self, y): """ Set the *y* position of the text. Parameters ---------- y : float """ self._y = y self.stale = True def set_rotation(self, s): """ Set the rotation of the text. Parameters ---------- s : float or {'vertical', 'horizontal'} The rotation angle in degrees in mathematically positive direction (counterclockwise). 'horizontal' equals 0, 'vertical' equals 90. """ if isinstance(s, Real): self._rotation = float(s) % 360 elif cbook._str_equal(s, 'horizontal') or s is None: self._rotation = 0. elif cbook._str_equal(s, 'vertical'): self._rotation = 90. else: raise ValueError("rotation must be 'vertical', 'horizontal' or " f"a number, not {s}") self.stale = True def set_transform_rotates_text(self, t): """ Whether rotations of the transform affect the text direction. Parameters ---------- t : bool """ self._transform_rotates_text = t self.stale = True def set_verticalalignment(self, align): """ Set the vertical alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'baseline', 'bottom', 'center', 'center_baseline', 'top'} """ _api.check_in_list( ['top', 'bottom', 'center', 'baseline', 'center_baseline'], align=align) self._verticalalignment = align self.stale = True def set_text(self, s): r""" Set the text string *s*. It may contain newlines (``\n``) or math in LaTeX syntax. Parameters ---------- s : object Any object gets converted to its `str` representation, except for ``None`` which is converted to an empty string. """ s = '' if s is None else str(s) if s != self._text: self._text = s self.stale = True def _preprocess_math(self, s): """ Return the string *s* after mathtext preprocessing, and the kind of mathtext support needed. - If *self* is configured to use TeX, return *s* unchanged except that a single space gets escaped, and the flag "TeX". - Otherwise, if *s* is mathtext (has an even number of unescaped dollar signs) and ``parse_math`` is not set to False, return *s* and the flag True. - Otherwise, return *s* with dollar signs unescaped, and the flag False. """ if self.get_usetex(): if s == " ": s = r"\ " return s, "TeX" elif not self.get_parse_math(): return s, False elif cbook.is_math_text(s): return s, True else: return s.replace(r"\$", "$"), False def set_fontproperties(self, fp): """ Set the font properties that control the text. Parameters ---------- fp : `.font_manager.FontProperties` or `str` or `pathlib.Path` If a `str`, it is interpreted as a fontconfig pattern parsed by `.FontProperties`. If a `pathlib.Path`, it is interpreted as the absolute path to a font file. """ self._fontproperties = FontProperties._from_any(fp).copy() self.stale = True @_docstring.kwarg_doc("bool, default: :rc:`text.usetex`") def set_usetex(self, usetex): """ Parameters ---------- usetex : bool or None Whether to render using TeX, ``None`` means to use :rc:`text.usetex`. """ self._usetex = bool(mpl._val_or_rc(usetex, 'text.usetex')) self.stale = True def get_usetex(self): """Return whether this `Text` object uses TeX for rendering.""" return self._usetex def set_parse_math(self, parse_math): """ Override switch to disable any mathtext parsing for this `Text`. Parameters ---------- parse_math : bool If False, this `Text` will never use mathtext. If True, mathtext will be used if there is an even number of unescaped dollar signs. """ self._parse_math = bool(parse_math) def get_parse_math(self): """Return whether mathtext parsing is considered for this `Text`.""" return self._parse_math def set_fontname(self, fontname): """ Alias for `set_fontfamily`. One-way alias only: the getter differs. Parameters ---------- fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', \ 'monospace'} See Also -------- .font_manager.FontProperties.set_family """ self.set_fontfamily(fontname) def _ha_for_angle(self, angle): """ Determines horizontal alignment ('ha') for rotation_mode "xtick" based on the angle of rotation in degrees and the vertical alignment. """ anchor_at_bottom = self.get_verticalalignment() == 'bottom' if (angle <= 10 or 85 <= angle <= 95 or 350 <= angle or 170 <= angle <= 190 or 265 <= angle <= 275): return 'center' elif 10 < angle < 85 or 190 < angle < 265: return 'left' if anchor_at_bottom else 'right' return 'right' if anchor_at_bottom else 'left' def _va_for_angle(self, angle): """ Determines vertical alignment ('va') for rotation_mode "ytick" based on the angle of rotation in degrees and the horizontal alignment. """ anchor_at_left = self.get_horizontalalignment() == 'left' if (angle <= 10 or 350 <= angle or 170 <= angle <= 190 or 80 <= angle <= 100 or 260 <= angle <= 280): return 'center' elif 190 < angle < 260 or 10 < angle < 80: return 'baseline' if anchor_at_left else 'top' return 'top' if anchor_at_left else 'baseline'
Text
python
walkccc__LeetCode
solutions/696. Count Binary Substrings/696.py
{ "start": 0, "end": 328 }
class ____: def countBinarySubstrings(self, s: str) -> int: ans = 0 prevCount = 0 equals = 1 for i in range(len(s) - 1): if s[i] == s[i + 1]: equals += 1 else: ans += min(prevCount, equals) prevCount = equals equals = 1 return ans + min(prevCount, equals)
Solution
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 1946, "end": 2015 }
class ____: __slots__ = ('a', deque.__name__)
PotentiallySecondGood
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_reproduce_failure.py
{ "start": 5273, "end": 6298 }
class ____: def __repr__(self): return "not a valid python expression" def test_does_not_print_reproduction_if_told_not_to(): @settings(print_blob=False) @given(st.integers().map(lambda x: Foo())) def test(i): raise ValueError with capture_out() as o, pytest.raises(ValueError): test() assert "@reproduce_failure" not in o.getvalue() def test_raises_invalid_if_wrong_version(): b = b"hello world" n = len(b) @reproduce_failure("1.0.0", encode_failure([b])) @given(st.binary(min_size=n, max_size=n)) def test(x): pass with pytest.raises(InvalidArgument): test() def test_does_not_print_reproduction_if_verbosity_set_to_quiet(): @given(st.data()) @settings(verbosity=Verbosity.quiet, print_blob=False) def test_always_fails(data): assert data.draw(st.just(False)) with capture_out() as out, pytest.raises(AssertionError): test_always_fails() assert "@reproduce_failure" not in out.getvalue()
Foo
python
fluentpython__example-code-2e
16-op-overloading/vector_v6.py
{ "start": 5681, "end": 8696 }
class ____: typecode = 'd' def __init__(self, components): self._components = array(self.typecode, components) def __iter__(self): return iter(self._components) def __repr__(self): components = reprlib.repr(self._components) components = components[components.find('['):-1] return f'Vector({components})' def __str__(self): return str(tuple(self)) def __bytes__(self): return (bytes([ord(self.typecode)]) + bytes(self._components)) def __eq__(self, other): return (len(self) == len(other) and all(a == b for a, b in zip(self, other))) def __hash__(self): hashes = (hash(x) for x in self) return functools.reduce(operator.xor, hashes, 0) # tag::VECTOR_V6_UNARY[] def __abs__(self): return math.hypot(*self) def __neg__(self): return Vector(-x for x in self) # <1> def __pos__(self): return Vector(self) # <2> # end::VECTOR_V6_UNARY[] def __bool__(self): return bool(abs(self)) def __len__(self): return len(self._components) def __getitem__(self, key): if isinstance(key, slice): cls = type(self) return cls(self._components[key]) index = operator.index(key) return self._components[index] __match_args__ = ('x', 'y', 'z', 't') def __getattr__(self, name): cls = type(self) try: pos = cls.__match_args__.index(name) except ValueError: pos = -1 if 0 <= pos < len(self._components): return self._components[pos] msg = f'{cls.__name__!r} object has no attribute {name!r}' raise AttributeError(msg) def angle(self, n): r = math.hypot(*self[n:]) a = math.atan2(r, self[n-1]) if (n == len(self) - 1) and (self[-1] < 0): return math.pi * 2 - a else: return a def angles(self): return (self.angle(n) for n in range(1, len(self))) def __format__(self, fmt_spec=''): if fmt_spec.endswith('h'): # hyperspherical coordinates fmt_spec = fmt_spec[:-1] coords = itertools.chain([abs(self)], self.angles()) outer_fmt = '<{}>' else: coords = self outer_fmt = '({})' components = (format(c, fmt_spec) for c in coords) return outer_fmt.format(', '.join(components)) @classmethod def frombytes(cls, octets): typecode = chr(octets[0]) memv = memoryview(octets[1:]).cast(typecode) return cls(memv) # tag::VECTOR_V6_ADD[] def __add__(self, other): try: pairs = itertools.zip_longest(self, other, fillvalue=0.0) return Vector(a + b for a, b in pairs) except TypeError: return NotImplemented def __radd__(self, other): return self + other # end::VECTOR_V6_ADD[]
Vector
python
plotly__plotly.py
plotly/graph_objs/layout/map/layer/symbol/_textfont.py
{ "start": 235, "end": 5888 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.map.layer.symbol" _path_str = "layout.map.layer.symbol.textfont" _valid_props = {"color", "family", "size", "style", "weight"} @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. size style Sets whether a font should be styled with a normal or italic face from its family. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, size=None, style=None, weight=None, **kwargs, ): """ Construct a new Textfont object Sets the icon text font (color=map.layer.paint.text-color, size=map.layer.layout.text-size). Has an effect only when `type` is set to "symbol". Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.map.lay er.symbol.Textfont` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. size style Sets whether a font should be styled with a normal or italic face from its family. weight Sets the weight (or boldness) of the font. Returns ------- Textfont """ super().__init__("textfont") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.map.layer.symbol.Textfont constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.map.layer.symbol.Textfont`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Textfont
python
mkdocstrings__mkdocstrings
src/mkdocstrings/_internal/debug.py
{ "start": 151, "end": 318 }
class ____: """Dataclass describing an environment variable.""" name: str """Variable name.""" value: str """Variable value.""" @dataclass
_Variable
python
gevent__gevent
src/gevent/pywsgi.py
{ "start": 60849, "end": 69235 }
class ____(StreamServer): """ A WSGI server based on :class:`StreamServer` that supports HTTPS. :keyword log: If given, an object with a ``write`` method to which request (access) logs will be written. If not given, defaults to :obj:`sys.stderr`. You may pass ``None`` to disable request logging. You may use a wrapper, around e.g., :mod:`logging`, to support objects that don't implement a ``write`` method. (If you pass a :class:`~logging.Logger` instance, or in general something that provides a ``log`` method but not a ``write`` method, such a wrapper will automatically be created and it will be logged to at the :data:`~logging.INFO` level.) :keyword error_log: If given, a file-like object with ``write``, ``writelines`` and ``flush`` methods to which error logs will be written. If not given, defaults to :obj:`sys.stderr`. You may pass ``None`` to disable error logging (not recommended). You may use a wrapper, around e.g., :mod:`logging`, to support objects that don't implement the proper methods. This parameter will become the value for ``wsgi.errors`` in the WSGI environment (if not already set). (As with *log*, wrappers for :class:`~logging.Logger` instances and the like will be created automatically and logged to at the :data:`~logging.ERROR` level.) .. seealso:: :class:`LoggingLogAdapter` See important warnings before attempting to use :mod:`logging`. .. versionchanged:: 1.1a3 Added the ``error_log`` parameter, and set ``wsgi.errors`` in the WSGI environment to this value. .. versionchanged:: 1.1a3 Add support for passing :class:`logging.Logger` objects to the ``log`` and ``error_log`` arguments. .. versionchanged:: 20.6.0 Passing a ``handle`` kwarg to the constructor is now officially deprecated. """ #: A callable taking three arguments: (socket, address, server) and returning #: an object with a ``handle()`` method. The callable is called once for #: each incoming socket request, as is its handle method. The handle method should not #: return until all use of the socket is complete. #: #: This class uses the :class:`WSGIHandler` object as the default value. You may #: subclass this class and set a different default value, or you may pass #: a value to use in the ``handler_class`` keyword constructor argument. handler_class = WSGIHandler #: The object to which request logs will be written. #: It must never be None. Initialized from the ``log`` constructor #: parameter. log = None #: The object to which error logs will be written. #: It must never be None. Initialized from the ``error_log`` constructor #: parameter. error_log = None #: The class of environ objects passed to the handlers. #: Must be a dict subclass. For compliance with :pep:`3333` #: and libraries like WebOb, this is simply :class:`dict` #: but this can be customized in a subclass or per-instance #: (probably to :class:`WSGISecureEnviron`). #: #: .. versionadded:: 1.2a1 environ_class = dict # Undocumented internal detail: the class that WSGIHandler._log_error # will cast to before passing to the loop. secure_environ_class = WSGISecureEnviron base_env = {'GATEWAY_INTERFACE': 'CGI/1.1', 'SERVER_SOFTWARE': 'gevent/%d.%d Python/%d.%d' % (gevent.version_info[:2] + sys.version_info[:2]), 'SCRIPT_NAME': '', 'wsgi.version': (1, 0), 'wsgi.multithread': False, # XXX: Aren't we really, though? 'wsgi.multiprocess': False, 'wsgi.run_once': False} def __init__(self, listener, application=None, backlog=None, spawn='default', log='default', error_log='default', handler_class=None, environ=None, **ssl_args): if 'handle' in ssl_args: # The ultimate base class (BaseServer) uses 'handle' for # the thing we call 'application'. We never deliberately # bass a `handle` argument to the base class, but one # could sneak in through ``**ssl_args``, even though that # is not the intent, while application is None. That # causes our own ``def handle`` method to be replaced, # probably leading to bad results. Passing a 'handle' # instead of an 'application' can really confuse things. import warnings warnings.warn("Passing 'handle' kwarg to WSGIServer is deprecated. " "Did you mean application?", DeprecationWarning, stacklevel=2) StreamServer.__init__(self, listener, backlog=backlog, spawn=spawn, **ssl_args) if application is not None: self.application = application if handler_class is not None: self.handler_class = handler_class # Note that we can't initialize these as class variables: # sys.stderr might get monkey patched at runtime. def _make_log(l, level=20): if l == 'default': return sys.stderr if l is None: return _NoopLog() if not hasattr(l, 'write') and hasattr(l, 'log'): return LoggingLogAdapter(l, level) return l self.log = _make_log(log) self.error_log = _make_log(error_log, 40) # logging.ERROR self.set_environ(environ) self.set_max_accept() def set_environ(self, environ=None): if environ is not None: self.environ = environ environ_update = getattr(self, 'environ', None) self.environ = self.environ_class(self.base_env) if self.ssl_enabled: self.environ['wsgi.url_scheme'] = 'https' else: self.environ['wsgi.url_scheme'] = 'http' if environ_update is not None: self.environ.update(environ_update) if self.environ.get('wsgi.errors') is None: self.environ['wsgi.errors'] = self.error_log def set_max_accept(self): if self.environ.get('wsgi.multiprocess'): self.max_accept = 1 def get_environ(self): return self.environ_class(self.environ) def init_socket(self): StreamServer.init_socket(self) self.update_environ() def update_environ(self): """ Called before the first request is handled to fill in WSGI environment values. This includes getting the correct server name and port. """ address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) if not isinstance(name, str): name = name.decode('ascii') self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '') def handle(self, sock, address): """ Create an instance of :attr:`handler_class` to handle the request. This method blocks until the handler returns. """ # pylint:disable=method-hidden handler = self.handler_class(sock, address, self) handler.handle() def _main(): # Provisional main handler, for quick tests, not production # usage. from gevent import monkey; monkey.patch_all() import argparse import importlib parser = argparse.ArgumentParser() parser.add_argument("app", help="dotted name of WSGI app callable [module:callable]") parser.add_argument("-b", "--bind", help="The socket to bind", default=":8080") args = parser.parse_args() module_name, app_name = args.app.split(':') module = importlib.import_module(module_name) app = getattr(module, app_name) bind = args.bind server = WSGIServer(bind, app) server.serve_forever() if __name__ == '__main__': _main()
WSGIServer
python
huggingface__transformers
src/transformers/modeling_outputs.py
{ "start": 16973, "end": 20067 }
class ____(ModelOutput): """ Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden states terms, to train a MoE model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): z_loss for the sparse modules. aux_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided): aux_loss for the sparse modules. router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse modules. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None z_loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None router_logits: Optional[tuple[torch.FloatTensor]] = None @dataclass
MoECausalLMOutputWithPast
python
kamyu104__LeetCode-Solutions
Python/check-balanced-string.py
{ "start": 38, "end": 295 }
class ____(object): def isBalanced(self, num): """ :type num: str :rtype: bool """ return sum(ord(num[i])-ord('0') for i in xrange(0, len(num), 2)) == sum(ord(num[i])-ord('0') for i in xrange(1, len(num), 2))
Solution
python
dask__distributed
distributed/dashboard/components/scheduler.py
{ "start": 35417, "end": 39895 }
class ____(DashboardComponent): """Worker network bandwidth chart Plots horizontal bars with the host_net_io.read_bps and host_net_io.write_bps worker state """ @log_errors def __init__(self, scheduler, **kwargs): self.scheduler = scheduler self.source = ColumnDataSource( { "y_read": [], "y_write": [], "x_read": [], "x_write": [], "x_read_disk": [], "x_write_disk": [], } ) self.bandwidth = figure( title="Worker Network Bandwidth", tools="", name="worker_network_bandwidth", **kwargs, ) # host_net_io.read_bps self.bandwidth.hbar( y="y_read", right="x_read", line_color=None, left=0, height=0.5, fill_color="red", legend_label="read", source=self.source, ) # host_net_io.write_bps self.bandwidth.hbar( y="y_write", right="x_write", line_color=None, left=0, height=0.5, fill_color="blue", legend_label="write", source=self.source, ) self.bandwidth.axis[0].ticker = BasicTicker(**TICKS_1024) self.bandwidth.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b") self.bandwidth.xaxis.major_label_orientation = XLABEL_ORIENTATION self.bandwidth.xaxis.minor_tick_line_alpha = 0 self.bandwidth.x_range = Range1d(start=0) self.bandwidth.yaxis.visible = False self.bandwidth.ygrid.visible = False self.bandwidth.toolbar_location = None self.disk = figure( title="Workers Disk", tools="", name="worker_disk", **kwargs, ) # host_disk_io.read_bps self.disk.hbar( y="y_read", right="x_read_disk", line_color=None, left=0, height=0.5, fill_color="red", legend_label="read", source=self.source, ) # host_disk_io.write_bps self.disk.hbar( y="y_write", right="x_write_disk", line_color=None, left=0, height=0.5, fill_color="blue", legend_label="write", source=self.source, ) self.disk.axis[0].ticker = BasicTicker(**TICKS_1024) self.disk.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b") self.disk.xaxis.major_label_orientation = XLABEL_ORIENTATION self.disk.xaxis.minor_tick_line_alpha = 0 self.disk.x_range = Range1d(start=0) self.disk.yaxis.visible = False self.disk.ygrid.visible = False self.disk.toolbar_location = None @without_property_validation @log_errors def update(self): workers = self.scheduler.workers.values() h = 0.1 y_read = [i + 0.75 + i * h for i in range(len(workers))] y_write = [i + 0.25 + i * h for i in range(len(workers))] x_read = [] x_write = [] x_read_disk = [] x_write_disk = [] for ws in workers: x_read.append(ws.metrics["host_net_io"]["read_bps"]) x_write.append(ws.metrics["host_net_io"]["write_bps"]) x_read_disk.append(ws.metrics.get("host_disk_io", {}).get("read_bps", 0)) x_write_disk.append(ws.metrics.get("host_disk_io", {}).get("write_bps", 0)) if self.scheduler.workers: self.bandwidth.x_range.end = max( max(x_read), max(x_write), 100_000_000, 0.95 * self.bandwidth.x_range.end, ) self.disk.x_range.end = max( max(x_read_disk), max(x_write_disk), 100_000_000, 0.95 * self.disk.x_range.end, ) else: self.bandwidth.x_range.end = 100_000_000 self.disk.x_range.end = 100_000_000 result = { "y_read": y_read, "y_write": y_write, "x_read": x_read, "x_write": x_write, "x_read_disk": x_read_disk, "x_write_disk": x_write_disk, } update(self.source, result)
WorkerNetworkBandwidth
python
huggingface__transformers
src/transformers/models/patchtst/modeling_patchtst.py
{ "start": 53216, "end": 57784 }
class ____(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) config.do_mask_input = True self.model = PatchTSTModel(config=config) self.head = PatchTSTMaskPretrainHead(config) # Initialize weights and apply final processing self.post_init() def forward( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, PatchTSTForPretrainingOutput]: r""" Parameters: past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers output_attentions (`bool`, *optional*): Whether or not to return the output attention of all layers return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. Returns: `PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import PatchTSTConfig, PatchTSTForPretraining >>> file = hf_hub_download( ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> # Config for random mask pretraining >>> config = PatchTSTConfig( ... num_input_channels=7, ... context_length=512, ... patch_length=12, ... stride=12, ... mask_type='random', ... random_mask_ratio=0.4, ... use_cls_token=True, ... ) >>> # Config for forecast mask pretraining >>> config = PatchTSTConfig( ... num_input_channels=7, ... context_length=512, ... patch_length=12, ... stride=12, ... mask_type='forecast', ... num_forecast_mask_patches=5, ... use_cls_token=True, ... ) >>> model = PatchTSTForPretraining(config) >>> # during training, one provides both past and future values >>> outputs = model(past_values=batch["past_values"]) >>> loss = outputs.loss >>> loss.backward() ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # past_values: [bs x num_channels x num_patches x d_model] or # [bs x num_channels x (num_patches+1) x d_model] if use cls_token model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) # last_hidden_state: [bs x num_channels x num_patches x patch_length] or # [bs x num_channels x (num_patches+1) x patch_length] if use cls_token x_hat = self.head(model_output.last_hidden_state) # calculate masked_loss loss = nn.MSELoss(reduction="none") loss_val = loss(x_hat, model_output.patch_input) masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) encoder_states = model_output.hidden_states if not return_dict: outputs = (x_hat,) + model_output[1:-4] outputs = (masked_loss,) + outputs if masked_loss is not None else outputs return outputs return PatchTSTForPretrainingOutput( loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions )
PatchTSTForPretraining
python
jupyterlab__jupyterlab
jupyterlab/semver.py
{ "start": 20972, "end": 35793 }
class ____: def __init__(self, range_, loose): self.loose = loose # First, split based on boolean or || self.raw = range_ xs = [self.parse_range(r.strip()) for r in re.split(r"\s*\|\|\s*", range_)] self.set = [r for r in xs if r] if not len(self.set): raise ValueError(f"Invalid SemVer Range: {range_}") self.format() def __repr__(self): return f'<SemVer Range "{self.range}">' def format(self): self.range = "||".join( [" ".join(c.value for c in comps).strip() for comps in self.set] ).strip() logger.debug("Range format %s", self.range) return self.range def __str__(self): return self.range def parse_range(self, range_): loose = self.loose logger.debug("range %s %s", range_, loose) # `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4` hr = regexp[HYPHENRANGELOOSE] if loose else regexp[HYPHENRANGE] range_ = hr.sub( hyphen_replace, range_, ) logger.debug("hyphen replace %s", range_) # `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5` range_ = regexp[COMPARATORTRIM].sub(comparatorTrimReplace, range_) logger.debug("comparator trim %s, %s", range_, regexp[COMPARATORTRIM]) # `~ 1.2.3` => `~1.2.3` range_ = regexp[TILDETRIM].sub(tildeTrimReplace, range_) # `^ 1.2.3` => `^1.2.3` range_ = regexp[CARETTRIM].sub(caretTrimReplace, range_) # normalize spaces range_ = " ".join(re.split(r"\s+", range_)) # At this point, the range is completely trimmed and # ready to be split into comparators. comp_re = regexp[COMPARATORLOOSE] if loose else regexp[COMPARATOR] set_ = re.split( r"\s+", " ".join([parse_comparator(comp, loose) for comp in range_.split(" ")]) ) if self.loose: # in loose mode, throw out any that are not valid comparators set_ = [comp for comp in set_ if comp_re.search(comp)] set_ = [make_comparator(comp, loose) for comp in set_] return set_ def test(self, version): if not version: # xxx return False if isinstance(version, string_type): version = make_semver(version, loose=self.loose) return any(test_set(e, version) for e in self.set) # Mostly just for testing and legacy API reasons def to_comparators(range_, loose): return [ " ".join([c.value for c in comp]).strip().split(" ") for comp in make_range(range_, loose).set ] # comprised of xranges, tildes, stars, and gtlt's at this point. # already replaced the hyphen ranges # turn into a set of JUST comparators. def parse_comparator(comp, loose): logger.debug("comp %s", comp) comp = replace_carets(comp, loose) logger.debug("caret %s", comp) comp = replace_tildes(comp, loose) logger.debug("tildes %s", comp) comp = replace_xranges(comp, loose) logger.debug("xrange %s", comp) comp = replace_stars(comp, loose) logger.debug("stars %s", comp) return comp def is_x(id_): return id_ is None or id_ == "" or id_.lower() == "x" or id_ == "*" # ~, ~> --> * (any, kinda silly) # ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0 # ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0 # ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0 # ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0 # ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0 def replace_tildes(comp, loose): return " ".join([replace_tilde(c, loose) for c in re.split(r"\s+", comp.strip())]) def replace_tilde(comp, loose): r = regexp[TILDELOOSE] if loose else regexp[TILDE] def repl(mob): _ = mob.group(0) M, m, p, pr, _ = mob.groups() logger.debug("tilde %s %s %s %s %s %s", comp, _, M, m, p, pr) if is_x(M): ret = "" elif is_x(m): ret = ">=" + M + ".0.0 <" + str(int(M) + 1) + ".0.0" elif is_x(p): # ~1.2 == >=1.2.0 <1.3.0 ret = ">=" + M + "." + m + ".0 <" + M + "." + str(int(m) + 1) + ".0" elif pr: logger.debug("replaceTilde pr %s", pr) if pr[0] != "-": pr = "-" + pr ret = ">=" + M + "." + m + "." + p + pr + " <" + M + "." + str(int(m) + 1) + ".0" else: # ~1.2.3 == >=1.2.3 <1.3.0 ret = ">=" + M + "." + m + "." + p + " <" + M + "." + str(int(m) + 1) + ".0" logger.debug("tilde return, %s", ret) return ret return r.sub(repl, comp) # ^ --> * (any, kinda silly) # ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0 # ^2.0, ^2.0.x --> >=2.0.0 <3.0.0 # ^1.2, ^1.2.x --> >=1.2.0 <2.0.0 # ^1.2.3 --> >=1.2.3 <2.0.0 # ^1.2.0 --> >=1.2.0 <2.0.0 def replace_carets(comp, loose): return " ".join([replace_caret(c, loose) for c in re.split(r"\s+", comp.strip())]) def replace_caret(comp, loose): r = regexp[CARETLOOSE] if loose else regexp[CARET] def repl(mob): # noqa PLR0911 m0 = mob.group(0) M, m, p, pr, _ = mob.groups() logger.debug("caret %s %s %s %s %s %s", comp, m0, M, m, p, pr) if is_x(M): ret = "" elif is_x(m): ret = ">=" + M + ".0.0 <" + str(int(M) + 1) + ".0.0" elif is_x(p): if M == "0": ret = ">=" + M + "." + m + ".0 <" + M + "." + str(int(m) + 1) + ".0" else: ret = ">=" + M + "." + m + ".0 <" + str(int(M) + 1) + ".0.0" elif pr: logger.debug("replaceCaret pr %s", pr) if pr[0] != "-": pr = "-" + pr if M == "0": if m == "0": ret = ( ">=" + M + "." + m + "." + (p or "") + pr + " <" + M + "." + m + "." + str(int(p or 0) + 1) ) else: ret = ( ">=" + M + "." + m + "." + (p or "") + pr + " <" + M + "." + str(int(m) + 1) + ".0" ) else: ret = ">=" + M + "." + m + "." + (p or "") + pr + " <" + str(int(M) + 1) + ".0.0" else: if M == "0": if m == "0": ret = ( ">=" + M + "." + m + "." + (p or "") + " <" + M + "." + m + "." + str(int(p or 0) + 1) ) else: ret = ( ">=" + M + "." + m + "." + (p or "") + " <" + M + "." + str(int(m) + 1) + ".0" ) else: ret = ">=" + M + "." + m + "." + (p or "") + " <" + str(int(M) + 1) + ".0.0" logger.debug("caret return %s", ret) return ret return r.sub(repl, comp) def replace_xranges(comp, loose): logger.debug("replaceXRanges %s %s", comp, loose) return " ".join([replace_xrange(c, loose) for c in re.split(r"\s+", comp.strip())]) def replace_xrange(comp, loose): comp = comp.strip() r = regexp[XRANGELOOSE] if loose else regexp[XRANGE] def repl(mob): # noqa PLR0911 ret = mob.group(0) gtlt, M, m, p, pr, _ = mob.groups() logger.debug("xrange %s %s %s %s %s %s %s", comp, ret, gtlt, M, m, p, pr) xM = is_x(M) xm = xM or is_x(m) xp = xm or is_x(p) any_x = xp if gtlt == "=" and any_x: gtlt = "" logger.debug("xrange gtlt=%s any_x=%s", gtlt, any_x) if xM: if gtlt == ">" or gtlt == "<": # noqa SIM108 # nothing is allowed ret = "<0.0.0" else: ret = "*" elif gtlt and any_x: # replace X with 0, and then append the -0 min-prerelease if xm: m = 0 if xp: p = 0 if gtlt == ">": # >1 => >=2.0.0 # >1.2 => >=1.3.0 # >1.2.3 => >= 1.2.4 gtlt = ">=" if xm: M = int(M) + 1 m = 0 p = 0 elif xp: m = int(m) + 1 p = 0 elif gtlt == "<=": # <=0.7.x is actually <0.8.0, since any 0.7.x should # pass. Similarly, <=7.x is actually <8.0.0, etc. gtlt = "<" if xm: M = int(M) + 1 else: m = int(m) + 1 ret = gtlt + str(M) + "." + str(m) + "." + str(p) elif xm: ret = ">=" + M + ".0.0 <" + str(int(M) + 1) + ".0.0" elif xp: ret = ">=" + M + "." + m + ".0 <" + M + "." + str(int(m) + 1) + ".0" logger.debug("xRange return %s", ret) return ret return r.sub(repl, comp) # Because * is AND-ed with everything else in the comparator, # and '' means "any version", just remove the *s entirely. def replace_stars(comp, loose): logger.debug("replaceStars %s %s", comp, loose) # Looseness is ignored here. star is always as loose as it gets! return regexp[STAR].sub("", comp.strip()) # This function is passed to string.replace(re[HYPHENRANGE]) # M, m, patch, prerelease, build # 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 # 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do # 1.2 - 3.4 => >=1.2.0 <3.5.0 def hyphen_replace(mob): from_, fM, fm, fp, fpr, fb, to, tM, tm, tp, tpr, tb = mob.groups() if is_x(fM): from_ = "" elif is_x(fm): from_ = ">=" + fM + ".0.0" elif is_x(fp): from_ = ">=" + fM + "." + fm + ".0" else: from_ = ">=" + from_ if is_x(tM): to = "" elif is_x(tm): to = "<" + str(int(tM) + 1) + ".0.0" elif is_x(tp): to = "<" + tM + "." + str(int(tm) + 1) + ".0" elif tpr: to = "<=" + tM + "." + tm + "." + tp + "-" + tpr else: to = "<=" + to return (from_ + " " + to).strip() def test_set(set_, version): for e in set_: if not e.test(version): return False if len(version.prerelease) > 0: # Find the set of versions that are allowed to have prereleases # For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 # That should allow `1.2.3-pr.2` to pass. # However, `1.2.4-alpha.notready` should NOT be allowed, # even though it's within the range set by the comparators. for e in set_: if e.semver == ANY: continue if len(e.semver.prerelease) > 0: allowed = e.semver if ( allowed.major == version.major and allowed.minor == version.minor and allowed.patch == version.patch ): return True # Version has a -pre, but it's not one of the ones we like. return False return True def satisfies(version, range_, loose=False): try: range_ = make_range(range_, loose) except Exception: return False return range_.test(version) def max_satisfying(versions, range_, loose=False): try: range_ob = make_range(range_, loose=loose) except Exception: return None max_ = None max_sv = None for v in versions: if range_ob.test(v): # noqa # satisfies(v, range_, loose=loose) if max_ is None or max_sv.compare(v) == -1: # compare(max, v, true) max_ = v max_sv = make_semver(max_, loose=loose) return max_ def valid_range(range_, loose): try: # Return '*' instead of '' so that truthiness works. # This will throw if it's invalid anyway return make_range(range_, loose).range or "*" except Exception: return None # Determine if version is less than all the versions possible in the range def ltr(version, range_, loose): return outside(version, range_, "<", loose) # Determine if version is greater than all the versions possible in the range. def rtr(version, range_, loose): return outside(version, range_, ">", loose) def outside(version, range_, hilo, loose): version = make_semver(version, loose) range_ = make_range(range_, loose) if hilo == ">": gtfn = gt ltefn = lte ltfn = lt comp = ">" ecomp = ">=" elif hilo == "<": gtfn = lt ltefn = gte ltfn = gt comp = "<" ecomp = "<=" else: raise ValueError("Must provide a hilo val of '<' or '>'") # If it satisfies the range it is not outside if satisfies(version, range_, loose): return False # From now on, variable terms are as if we're in "gtr" mode. # but note that everything is flipped for the "ltr" function. for comparators in range_.set: high = None low = None for comparator in comparators: high = high or comparator low = low or comparator if gtfn(comparator.semver, high.semver, loose): high = comparator elif ltfn(comparator.semver, low.semver, loose): low = comparator # If the edge version comparator has a operator then our version # isn't outside it if high.operator == comp or high.operator == ecomp: return False # If the lowest version comparator has an operator and our version # is less than it then it isn't higher than the range if (not low.operator or low.operator == comp) and ltefn(version, low.semver): # noqa SIM114 return False elif low.operator == ecomp and ltfn(version, low.semver): return False return True
Range
python
getsentry__sentry
src/sentry/replays/lib/event_linking.py
{ "start": 953, "end": 3611 }
class ____(EventLinkPayload): fatal_id: str PayloadUnionType = Union[ EventLinkPayloadDebugId, EventLinkPayloadInfoId, EventLinkPayloadWarningId, EventLinkPayloadErrorId, EventLinkPayloadFatalId, ] def get_level_key( type: str, replay_id: str, event_hash: str, timestamp: int, level: str | None, event_id: str, ) -> PayloadUnionType: if level == "debug": return EventLinkPayloadDebugId( type=type, replay_id=replay_id, event_hash=event_hash, timestamp=timestamp, debug_id=event_id, ) elif level == "info": return EventLinkPayloadInfoId( type=type, replay_id=replay_id, event_hash=event_hash, timestamp=timestamp, info_id=event_id, ) elif level == "warning": return EventLinkPayloadWarningId( type=type, replay_id=replay_id, event_hash=event_hash, timestamp=timestamp, warning_id=event_id, ) elif level == "error": return EventLinkPayloadErrorId( type=type, replay_id=replay_id, event_hash=event_hash, timestamp=timestamp, error_id=event_id, ) elif level == "fatal": return EventLinkPayloadFatalId( type=type, replay_id=replay_id, event_hash=event_hash, timestamp=timestamp, fatal_id=event_id, ) else: # note that this in theory should never happen, but we want to be careful raise ValueError(f"Invalid level {level}") def transform_event_for_linking_payload(replay_id: str, event: BaseEvent) -> EventLinkKafkaMessage: def _make_json_binary_payload() -> PayloadUnionType: level: str | None = event.data.get("level") payload_with_level = get_level_key( type="event_link", replay_id=replay_id, event_hash=_make_event_hash(event.event_id), timestamp=int(event.datetime.timestamp()), level=level, event_id=event.event_id, ) return payload_with_level return { "type": "replay_event", "start_time": int(time.time()), "replay_id": replay_id, "project_id": event.project.id, "segment_id": None, "retention_days": 90, "payload": _make_json_binary_payload(), } def _make_event_hash(event_id: str) -> str: md5_hash = md5(event_id.encode("utf-8")).hexdigest() return str(uuid.UUID(md5_hash))
EventLinkPayloadFatalId
python
fastapi__sqlmodel
docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial005_py310.py
{ "start": 329, "end": 3840 }
class ____(SQLModel, table=True): id: int | None = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: int | None = Field(default=None, index=True) team_id: int | None = Field( default=None, foreign_key="team.id", ondelete="RESTRICT" ) team: Team | None = Relationship(back_populates="heroes") sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) with engine.connect() as connection: connection.execute(text("PRAGMA foreign_keys=ON")) # for SQLite only def create_heroes(): with Session(engine) as session: team_preventers = Team(name="Preventers", headquarters="Sharp Tower") team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar") hero_deadpond = Hero( name="Deadpond", secret_name="Dive Wilson", team=team_z_force ) hero_rusty_man = Hero( name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers ) hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") session.add(hero_deadpond) session.add(hero_rusty_man) session.add(hero_spider_boy) session.commit() session.refresh(hero_deadpond) session.refresh(hero_rusty_man) session.refresh(hero_spider_boy) print("Created hero:", hero_deadpond) print("Created hero:", hero_rusty_man) print("Created hero:", hero_spider_boy) hero_spider_boy.team = team_preventers session.add(hero_spider_boy) session.commit() session.refresh(hero_spider_boy) print("Updated hero:", hero_spider_boy) hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E") team_wakaland = Team( name="Wakaland", headquarters="Wakaland Capital City", heroes=[hero_black_lion, hero_sure_e], ) session.add(team_wakaland) session.commit() session.refresh(team_wakaland) print("Team Wakaland:", team_wakaland) def remove_team_heroes(): with Session(engine) as session: statement = select(Team).where(Team.name == "Wakaland") team = session.exec(statement).one() team.heroes.clear() session.add(team) session.commit() session.refresh(team) print("Team with removed heroes:", team) def delete_team(): with Session(engine) as session: statement = select(Team).where(Team.name == "Wakaland") team = session.exec(statement).one() session.delete(team) session.commit() print("Deleted team:", team) def select_deleted_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.name == "Black Lion") result = session.exec(statement) hero = result.first() print("Black Lion has no team:", hero) statement = select(Hero).where(Hero.name == "Princess Sure-E") result = session.exec(statement) hero = result.first() print("Princess Sure-E has no team:", hero) def main(): create_db_and_tables() create_heroes() remove_team_heroes() delete_team() select_deleted_heroes() if __name__ == "__main__": main()
Hero
python
tartley__colorama
colorama/winterm.py
{ "start": 408, "end": 605 }
class ____: NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background BRIGHT_BACKGROUND = 0x80 # dim text, bright background
WinStyle
python
openai__openai-python
src/openai/types/vector_store_create_params.py
{ "start": 417, "end": 1574 }
class ____(TypedDict, total=False): chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. """ description: str """A description for the vector store. Can be used to describe the vector store's purpose. """ expires_after: ExpiresAfter """The expiration policy for a vector store.""" file_ids: SequenceNotStr[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. """ metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. """ name: str """The name of the vector store."""
VectorStoreCreateParams
python
pytorch__pytorch
torch/testing/_internal/distributed/nn/api/remote_module_test.py
{ "start": 1605, "end": 1759 }
class ____(enum.Enum): MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface" MODULE_CTOR = "module_ctor" @torch.jit.interface
ModuleCreationMode
python
django__django
tests/tasks/test_custom_backend.py
{ "start": 301, "end": 611 }
class ____(BaseTaskBackend): def __init__(self, alias, params): super().__init__(alias, params) self.prefix = self.options.get("prefix", "") def enqueue(self, *args, **kwargs): logger = logging.getLogger(__name__) logger.info(f"{self.prefix}Task enqueued.")
CustomBackend
python
python-attrs__attrs
src/attr/exceptions.py
{ "start": 1443, "end": 1619 }
class ____(RuntimeError): """ It was attempted to use an *attrs* feature that requires a newer Python version. .. versionadded:: 18.2.0 """
PythonTooOldError
python
falconry__falcon
falcon/_typing.py
{ "start": 8772, "end": 9078 }
class ____(Protocol[_AReqT, _ARespT]): """WSGI/ASGI middleware with resource handler.""" async def process_resource_async( self, req: _AReqT, resp: _ARespT, resource: object, params: Mapping[str, Any], ) -> None: ...
UniversalMiddlewareWithProcessResource
python
spyder-ide__spyder
spyder/app/tests/spyder-boilerplate/spyder_boilerplate/spyder/plugin.py
{ "start": 743, "end": 948 }
class ____(PluginConfigPage): # --- PluginConfigPage API # ------------------------------------------------------------------------ def setup_page(self): pass
SpyderBoilerplateConfigPage
python
paramiko__paramiko
paramiko/ssh_gss.py
{ "start": 21872, "end": 28713 }
class ____(_SSH_GSSAuth): """ Implementation of the Microsoft SSPI Kerberos Authentication for SSH2. :see: `.GSSAuth` """ def __init__(self, auth_method, gss_deleg_creds): """ :param str auth_method: The name of the SSH authentication mechanism (gssapi-with-mic or gss-keyex) :param bool gss_deleg_creds: Delegate client credentials or not """ _SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds) if self._gss_deleg_creds: self._gss_flags = ( sspicon.ISC_REQ_INTEGRITY | sspicon.ISC_REQ_MUTUAL_AUTH | sspicon.ISC_REQ_DELEGATE ) else: self._gss_flags = ( sspicon.ISC_REQ_INTEGRITY | sspicon.ISC_REQ_MUTUAL_AUTH ) def ssh_init_sec_context( self, target, desired_mech=None, username=None, recv_token=None ): """ Initialize a SSPI context. :param str username: The name of the user who attempts to login :param str target: The FQDN of the target to connect to :param str desired_mech: The negotiated SSPI mechanism ("pseudo negotiated" mechanism, because we support just the krb5 mechanism :-)) :param recv_token: The SSPI token received from the Server :raises: `.SSHException` -- Is raised if the desired mechanism of the client is not supported :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned """ from pyasn1.codec.der import decoder self._username = username self._gss_host = target error = 0 targ_name = "host/" + self._gss_host if desired_mech is not None: mech, __ = decoder.decode(desired_mech) if mech.__str__() != self._krb5_mech: raise SSHException("Unsupported mechanism OID.") try: if recv_token is None: self._gss_ctxt = sspi.ClientAuth( "Kerberos", scflags=self._gss_flags, targetspn=targ_name ) error, token = self._gss_ctxt.authorize(recv_token) token = token[0].Buffer except pywintypes.error as e: e.strerror += ", Target: {}".format(self._gss_host) raise if error == 0: """ if the status is GSS_COMPLETE (error = 0) the context is fully established an we can set _gss_ctxt_status to True. """ self._gss_ctxt_status = True token = None """ You won't get another token if the context is fully established, so i set token to None instead of "" """ return token def ssh_get_mic(self, session_id, gss_kex=False): """ Create the MIC token for a SSH2 message. :param str session_id: The SSH session ID :param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not :return: gssapi-with-mic: Returns the MIC token from SSPI for the message we created with ``_ssh_build_mic``. gssapi-keyex: Returns the MIC token from SSPI with the SSH session ID as message. """ self._session_id = session_id if not gss_kex: mic_field = self._ssh_build_mic( self._session_id, self._username, self._service, self._auth_method, ) mic_token = self._gss_ctxt.sign(mic_field) else: # for key exchange with gssapi-keyex mic_token = self._gss_srv_ctxt.sign(self._session_id) return mic_token def ssh_accept_sec_context(self, hostname, username, recv_token): """ Accept a SSPI context (server mode). :param str hostname: The servers FQDN :param str username: The name of the user who attempts to login :param str recv_token: The SSPI Token received from the server, if it's not the initial call. :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned """ self._gss_host = hostname self._username = username targ_name = "host/" + self._gss_host self._gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=targ_name) error, token = self._gss_srv_ctxt.authorize(recv_token) token = token[0].Buffer if error == 0: self._gss_srv_ctxt_status = True token = None return token def ssh_check_mic(self, mic_token, session_id, username=None): """ Verify the MIC token for a SSH2 message. :param str mic_token: The MIC token received from the client :param str session_id: The SSH session ID :param str username: The name of the user who attempts to login :return: None if the MIC check was successful :raises: ``sspi.error`` -- if the MIC check failed """ self._session_id = session_id self._username = username if username is not None: # server mode mic_field = self._ssh_build_mic( self._session_id, self._username, self._service, self._auth_method, ) # Verifies data and its signature. If verification fails, an # sspi.error will be raised. self._gss_srv_ctxt.verify(mic_field, mic_token) else: # for key exchange with gssapi-keyex # client mode # Verifies data and its signature. If verification fails, an # sspi.error will be raised. self._gss_ctxt.verify(self._session_id, mic_token) @property def credentials_delegated(self): """ Checks if credentials are delegated (server mode). :return: ``True`` if credentials are delegated, otherwise ``False`` """ return self._gss_flags & sspicon.ISC_REQ_DELEGATE and ( self._gss_srv_ctxt_status or self._gss_flags ) def save_client_creds(self, client_token): """ Save the Client token in a file. This is used by the SSH server to store the client credentials if credentials are delegated (server mode). :param str client_token: The SSPI token received form the client :raises: ``NotImplementedError`` -- Credential delegation is currently not supported in server mode """ raise NotImplementedError
_SSH_SSPI
python
nedbat__coveragepy
setup.py
{ "start": 4858, "end": 7494 }
class ____(build_ext): """Build C extensions, but fail with a straightforward exception.""" def run(self): """Wrap `run` with `BuildFailed`.""" try: build_ext.run(self) except errors.PlatformError as exc: raise BuildFailed() from exc def build_extension(self, ext): """Wrap `build_extension` with `BuildFailed`.""" if self.compiler.compiler_type == "msvc": ext.extra_compile_args = (ext.extra_compile_args or []) + [ "/std:c11", "/experimental:c11atomics", ] try: # Uncomment to test compile failure handling: # raise errors.CCompilerError("OOPS") build_ext.build_extension(self, ext) except ext_errors as exc: raise BuildFailed() from exc except ValueError as err: # this can happen on Windows 64 bit, see Python issue 7511 if "'path'" in str(err): # works with both py 2/3 raise BuildFailed() from err raise # There are a few reasons we might not be able to compile the C extension. # Figure out if we should attempt the C extension or not. Define # COVERAGE_DISABLE_EXTENSION in the build environment to explicitly disable the # extension. compile_extension = os.getenv("COVERAGE_DISABLE_EXTENSION", None) is None if "__pypy__" in sys.builtin_module_names: # Pypy can't compile C extensions compile_extension = False if compile_extension: setup_args.update( dict( ext_modules=[ Extension( "coverage.tracer", sources=[ "coverage/ctracer/datastack.c", "coverage/ctracer/filedisp.c", "coverage/ctracer/module.c", "coverage/ctracer/tracer.c", ], ), ], cmdclass={ "build_ext": ve_build_ext, }, ), ) def main(): """Actually invoke setup() with the arguments we built above.""" # For a variety of reasons, it might not be possible to install the C # extension. Try it with, and if it fails, try it without. try: setup(**setup_args) except BuildFailed as exc: msg = "Couldn't install with extension module, trying without it..." exc_msg = f"{exc.__class__.__name__}: {exc.cause}" print(f"**\n** {msg}\n** {exc_msg}\n**") del setup_args["ext_modules"] setup(**setup_args) if __name__ == "__main__": main()
ve_build_ext
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_tpu.py
{ "start": 10083, "end": 11213 }
class ____(BoringModel): def on_train_start(self): assert os.environ.get("PT_XLA_DEBUG") == "1", "PT_XLA_DEBUG was not set in environment variables" def teardown(self, stage): assert "PT_XLA_DEBUG" not in os.environ @RunIf(tpu=True, standalone=True) @mock.patch.dict(os.environ, os.environ.copy(), clear=True) def test_tpu_debug_mode(tmp_path): """Test if debug mode works on TPU.""" trainer_options = { "default_root_dir": tmp_path, "enable_progress_bar": False, "max_epochs": 4, "accelerator": "tpu", "devices": "auto", "limit_train_batches": 0.4, "limit_val_batches": 0.4, "strategy": XLAStrategy(debug=True), } model = AssertXLADebugModel() tpipes.run_model_test(trainer_options, model, with_hpc=False) @RunIf(tpu=True) def test_device_type_when_tpu_strategy_passed(tmp_path): trainer = Trainer(default_root_dir=tmp_path, strategy=XLAStrategy(), accelerator="tpu", devices="auto") assert isinstance(trainer.strategy, XLAStrategy) assert isinstance(trainer.accelerator, XLAAccelerator)
AssertXLADebugModel
python
pytest-dev__pytest
src/_pytest/stash.py
{ "start": 209, "end": 509 }
class ____(Generic[T]): """``StashKey`` is an object used as a key to a :class:`Stash`. A ``StashKey`` is associated with the type ``T`` of the value of the key. A ``StashKey`` is unique and cannot conflict with another key. .. versionadded:: 7.0 """ __slots__ = ()
StashKey