language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py
{ "start": 23010, "end": 25540 }
class ____(unittest.TestCase): def get_attention(self, multi_query): config = GPTBigCodeConfig.from_pretrained( "bigcode/gpt_bigcode-santacoder", multi_query=multi_query, attn_pdrop=0, resid_pdrop=0, ) # We need to set it here as it's normally set by the Model's __init__ config._attn_implementation = "sdpa" return GPTBigCodeAttention(config) @parameterized.expand([(seed, is_train_mode) for seed in range(5) for is_train_mode in [True, False]]) def test_mqa_reduces_to_mha(self, seed, is_train_mode=True): torch.manual_seed(seed) # CREATE MQA AND MHA ATTENTIONS attention_mqa = self.get_attention(True) attention_mha = self.get_attention(False) # ENFORCE MATCHING WEIGHTS num_heads = attention_mqa.num_heads embed_dim = attention_mqa.embed_dim head_dim = attention_mqa.head_dim with torch.no_grad(): mqa_q_weight = attention_mqa.c_attn.weight[:embed_dim, :].view(num_heads, 1, head_dim, embed_dim) mqa_kv_weight = attention_mqa.c_attn.weight[embed_dim:, :].view(1, 2, head_dim, embed_dim) mha_c_weight = torch.cat( [mqa_q_weight, mqa_kv_weight.expand(num_heads, 2, head_dim, embed_dim)], dim=1 ).view(3 * num_heads * head_dim, embed_dim) mqa_q_bias = attention_mqa.c_attn.bias[:embed_dim].view(num_heads, 1, head_dim) mqa_kv_bias = attention_mqa.c_attn.bias[embed_dim:].view(1, 2, head_dim) mha_c_bias = torch.cat([mqa_q_bias, mqa_kv_bias.expand(num_heads, 2, head_dim)], dim=1).view( 3 * num_heads * head_dim ) attention_mha.c_attn.weight.copy_(mha_c_weight) attention_mha.c_attn.bias.copy_(mha_c_bias) attention_mha.c_proj.weight.copy_(attention_mqa.c_proj.weight) attention_mha.c_proj.bias.copy_(attention_mqa.c_proj.bias) # PUT THE MODEL INTO THE CORRECT MODE attention_mha.train(is_train_mode) attention_mqa.train(is_train_mode) # RUN AN INPUT THROUGH THE MODELS num_tokens = 5 hidden_states = torch.randn(1, num_tokens, embed_dim) attention_mha_result = attention_mha(hidden_states)[0] attention_mqa_result = attention_mqa(hidden_states)[0] # CHECK THAT ALL OUTPUTS ARE THE SAME torch.testing.assert_close(attention_mha_result, attention_mqa_result, rtol=1e-5, atol=1e-5)
GPTBigCodeMQATest
python
jazzband__django-model-utils
tests/models.py
{ "start": 5974, "end": 6090 }
class ____(models.Model): content = SplitField() class Meta: abstract = True
SplitFieldAbstractParent
python
pallets__werkzeug
examples/shorty/application.py
{ "start": 463, "end": 1535 }
class ____: def __init__(self, db_uri): local.application = self self.database_engine = create_engine(db_uri, convert_unicode=True) self.dispatch = SharedDataMiddleware(self.dispatch, {"/static": STATIC_PATH}) def init_database(self): metadata.create_all(self.database_engine) def dispatch(self, environ, start_response): local.application = self request = Request(environ) local.url_adapter = adapter = url_map.bind_to_environ(environ) try: endpoint, values = adapter.match() handler = getattr(views, endpoint) response = handler(request, **values) except NotFound: response = views.not_found(request) response.status_code = 404 except HTTPException as e: response = e return ClosingIterator( response(environ, start_response), [session.remove, local_manager.cleanup] ) def __call__(self, environ, start_response): return self.dispatch(environ, start_response)
Shorty
python
pypa__pip
src/pip/_vendor/urllib3/exceptions.py
{ "start": 5582, "end": 5783 }
class ____(HTTPError): """ Body should be :class:`http.client.HTTPResponse` like (have an fp attribute which returns raw chunks) for read_chunked(). """ pass
BodyNotHttplibCompatible
python
getsentry__sentry
src/sentry/preprod/api/models/launchpad.py
{ "start": 710, "end": 1169 }
class ____(BaseModel): model_config = ConfigDict() state: Literal[PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING] = ( PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING ) # Missing SizeAnalysisState.COMPLETED is on purpose. The only way to mark # a size metrics as successful is via the assemble endpoint. PutSize = Annotated[ PutSizeFailed | PutSizePending | PutSizeProcessing, Field(discriminator="state"), ]
PutSizePending
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_polymorphic_rel.py
{ "start": 79454, "end": 80166 }
class ____( _PolymorphicTestBase, _PolymorphicAliasedJoins ): @testing.skip_if( lambda: True, "join condition doesn't work w/ this mapping" ) def test_lazyload_related_w_cache_check(self): pass def test_with_polymorphic_two_future_default_wp(self): """test #7262 compare to test_with_polymorphic_two_future_adhoc_wp """ sess = fixture_session() def go(): wp = with_polymorphic(Person, "*") eq_( sess.query(wp).order_by(wp.person_id).all(), self._emps_wo_relationships_fixture(), ) self.assert_sql_count(testing.db, go, 2)
PolymorphicAliasedJoinsTest
python
plotly__plotly.py
plotly/graph_objs/scattercarpet/selected/_marker.py
{ "start": 233, "end": 3615 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scattercarpet.selected" _path_str = "scattercarpet.selected.marker" _valid_props = {"color", "opacity", "size"} @property def color(self): """ Sets the marker color of selected points. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def opacity(self): """ Sets the marker opacity of selected points. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["opacity"] @opacity.setter def opacity(self, val): self["opacity"] = val @property def size(self): """ Sets the marker size of selected points. The 'size' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def _prop_descriptions(self): return """\ color Sets the marker color of selected points. opacity Sets the marker opacity of selected points. size Sets the marker size of selected points. """ def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scattercarpet. selected.Marker` color Sets the marker color of selected points. opacity Sets the marker opacity of selected points. size Sets the marker size of selected points. Returns ------- Marker """ super().__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scattercarpet.selected.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.scattercarpet.selected.Marker`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("opacity", arg, opacity) self._set_property("size", arg, size) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Marker
python
getsentry__sentry
src/sentry/grouping/component.py
{ "start": 10423, "end": 10526 }
class ____(BaseGroupingComponent[str]): id: str = "uri" # Top-level components
URIGroupingComponent
python
python__mypy
mypy/nodes.py
{ "start": 55190, "end": 55753 }
class ____(Statement): """Operator assignment statement such as x += 1""" __slots__ = ("op", "lvalue", "rvalue") __match_args__ = ("lvalue", "op", "rvalue") op: str # TODO: Enum? lvalue: Lvalue rvalue: Expression def __init__(self, op: str, lvalue: Lvalue, rvalue: Expression) -> None: super().__init__() self.op = op self.lvalue = lvalue self.rvalue = rvalue def accept(self, visitor: StatementVisitor[T]) -> T: return visitor.visit_operator_assignment_stmt(self)
OperatorAssignmentStmt
python
anthropics__anthropic-sdk-python
src/anthropic/lib/streaming/_beta_types.py
{ "start": 1480, "end": 1772 }
class ____(BaseModel): type: Literal["input_json"] partial_json: str """A partial JSON string delta e.g. `'"San Francisco,'` """ snapshot: object """The currently accumulated parsed object. e.g. `{'location': 'San Francisco, CA'}` """
BetaInputJsonEvent
python
sqlalchemy__sqlalchemy
test/base/test_result.py
{ "start": 30886, "end": 38658 }
class ____(fixtures.TestBase): """the chunkediterator supports "non tuple mode", where we bypass the expense of generating rows when we have only scalar values. """ @testing.fixture def no_tuple_fixture(self): data = [(1, 1, 1), (2, 1, 2), (1, 1, 1), (1, 3, 2), (4, 1, 2)] def chunks(num): while data: rows = data[0:num] data[:] = [] yield [row[0] for row in rows] return chunks @testing.fixture def no_tuple_one_fixture(self): data = [(1, 1, 1)] def chunks(num): while data: rows = data[0:num] data[:] = [] yield [row[0] for row in rows] return chunks @testing.fixture def normal_fixture(self): data = [(1, 1, 1), (2, 1, 2), (1, 1, 1), (1, 3, 2), (4, 1, 2)] def chunks(num): while data: rows = data[0:num] data[:] = [] yield [row[0] for row in rows] return chunks def test_scalar_mode_columns0_mapping(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.columns(0).mappings() eq_( list(r), [{"a": 1}, {"a": 2}, {"a": 1}, {"a": 1}, {"a": 4}], ) def test_scalar_mode_columns0_plain(self, no_tuple_fixture): """test #7953""" metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.columns(0) eq_( list(r), [(1,), (2,), (1,), (1,), (4,)], ) def test_scalar_mode_scalars0(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.scalars(0) eq_( list(r), [1, 2, 1, 1, 4], ) def test_scalar_mode_but_accessed_nonscalar_result(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) s1 = r.scalars() eq_(r.fetchone(), (1,)) eq_(s1.all(), [2, 1, 1, 4]) def test_scalar_mode_scalars_all(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.scalars() eq_(r.all(), [1, 2, 1, 1, 4]) def test_scalar_mode_mfiltered_unique_rows_all(self, no_tuple_fixture): metadata = result.SimpleResultMetaData( ["a", "b", "c"], _unique_filters=[int] ) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True, ) r = r.unique() eq_(r.all(), [(1,), (2,), (4,)]) @testing.combinations( lambda r: r.scalar(), lambda r: r.scalar_one(), lambda r: r.scalar_one_or_none(), argnames="get", ) def test_unique_scalar_accessors(self, no_tuple_one_fixture, get): metadata = result.SimpleResultMetaData( ["a", "b", "c"], _unique_filters=[int] ) r = result.ChunkedIteratorResult( metadata, no_tuple_one_fixture, source_supports_scalars=True, ) r = r.unique() eq_(get(r), 1) def test_scalar_mode_mfiltered_unique_mappings_all(self, no_tuple_fixture): metadata = result.SimpleResultMetaData( ["a", "b", "c"], _unique_filters=[int] ) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True, ) r = r.unique() eq_(r.mappings().all(), [{"a": 1}, {"a": 2}, {"a": 4}]) def test_scalar_mode_mfiltered_unique_scalars_all(self, no_tuple_fixture): metadata = result.SimpleResultMetaData( ["a", "b", "c"], _unique_filters=[int] ) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True, ) r = r.scalars().unique() eq_(r.all(), [1, 2, 4]) def test_scalar_mode_unique_scalars_all(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.unique().scalars() eq_(r.all(), [1, 2, 4]) def test_scalar_mode_scalars_fetchmany(self, normal_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, normal_fixture, source_supports_scalars=True ) r = r.scalars() eq_(list(r.partitions(2)), [[1, 2], [1, 1], [4]]) def test_scalar_mode_unique_scalars_fetchmany(self, normal_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, normal_fixture, source_supports_scalars=True ) r = r.scalars().unique() eq_(list(r.partitions(2)), [[1, 2], [4]]) def test_scalar_mode_unique_tuples_all(self, normal_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, normal_fixture, source_supports_scalars=True ) r = r.unique() eq_(r.all(), [(1,), (2,), (4,)]) def test_scalar_mode_tuples_all(self, normal_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, normal_fixture, source_supports_scalars=True ) eq_(r.all(), [(1,), (2,), (1,), (1,), (4,)]) def test_scalar_mode_scalars_iterate(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_fixture, source_supports_scalars=True ) r = r.scalars() eq_(list(r), [1, 2, 1, 1, 4]) def test_scalar_mode_tuples_iterate(self, normal_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, normal_fixture, source_supports_scalars=True ) eq_(list(r), [(1,), (2,), (1,), (1,), (4,)]) @testing.combinations( lambda r: r.one(), lambda r: r.first(), lambda r: r.one_or_none(), argnames="get", ) def test_scalar_mode_first(self, no_tuple_one_fixture, get): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_one_fixture, source_supports_scalars=True ) eq_(get(r), (1,)) @testing.combinations( lambda r: r.scalar(), lambda r: r.scalar_one(), lambda r: r.scalar_one_or_none(), argnames="get", ) def test_scalar_mode_scalar_one(self, no_tuple_one_fixture, get): metadata = result.SimpleResultMetaData(["a", "b", "c"]) r = result.ChunkedIteratorResult( metadata, no_tuple_one_fixture, source_supports_scalars=True ) eq_(get(r), 1)
OnlyScalarsTest
python
coleifer__peewee
tests/regressions.py
{ "start": 55662, "end": 58771 }
class ____(ModelTestCase): requires = [DF, DFC, DFGC] def test_django_filter_regression(self): a, b, c = [DF.create(name=n, value=i) for i, n in enumerate('abc')] ca1 = DFC.create(df=a, name='a1', value=11) ca2 = DFC.create(df=a, name='a2', value=12) cb1 = DFC.create(df=b, name='b1', value=21) gca1_1 = DFGC.create(dfc=ca1, name='a1-1', value=101) gca1_2 = DFGC.create(dfc=ca1, name='a1-2', value=101) gca2_1 = DFGC.create(dfc=ca2, name='a2-1', value=111) def assertNames(q, expected): self.assertEqual(sorted([n.name for n in q]), expected) assertNames(DF.filter(name='a'), ['a']) assertNames(DF.filter(name='a', id=a.id), ['a']) assertNames(DF.filter(name__in=['a', 'c']), ['a', 'c']) assertNames(DF.filter(name__in=['a', 'c'], id=a.id), ['a']) assertNames(DF.filter(dfc_set__name='a1'), ['a']) assertNames(DF.filter(dfc_set__name__in=['a1', 'b1']), ['a', 'b']) assertNames(DF.filter(DQ(dfc_set__name='a1') | DQ(dfc_set__name='b1')), ['a', 'b']) assertNames(DF.filter(dfc_set__dfgc_set__name='a1-1'), ['a']) assertNames(DF.filter( DQ(dfc_set__dfgc_set__name='a1-1') | DQ(dfc_set__dfgc_set__name__in=['x', 'y'])), ['a']) assertNames(DFC.filter(df__name='a'), ['a1', 'a2']) assertNames(DFC.filter(df__name='a', value=11), ['a1']) assertNames(DFC.filter(DQ(df__name='a') | DQ(df__name='b')), ['a1', 'a2', 'b1']) assertNames(DFC.filter( DQ(df__name='a') | DQ(dfgc_set__name='a1-1')).distinct(), ['a1', 'a2']) assertNames(DFGC.filter(dfc__df__name='a'), ['a1-1', 'a1-2', 'a2-1']) assertNames(DFGC.filter(dfc__df__name='a', dfc__name='a2'), ['a2-1']) assertNames(DFGC.filter( DQ(dfc__df__value__lte=0) | DQ(dfc__df__name='a', dfc__name='a1') | DQ(dfc__name='a2')), ['a1-1', 'a1-2', 'a2-1']) assertNames( (DFGC.filter(DQ(dfc__df__value__lte=10) | DQ(dfc__value__lte=101)) .filter(DQ(name__ilike='a1%') | DQ(dfc__value=101))), ['a1-1', 'a1-2']) assertNames(DFGC.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) assertNames(DFGC.filter(dfc__df=a.id), ['a1-1', 'a1-2', 'a2-1']) q = DFC.select().join(DF) assertNames(q.filter(df=a), ['a1', 'a2']) assertNames(q.filter(df__name='a'), ['a1', 'a2']) DFA = DF.alias() DFCA = DFC.alias() DFGCA = DFGC.alias() q = DFCA.select().join(DFA) assertNames(q.filter(df=a), ['a1', 'a2']) assertNames(q.filter(df__name='a'), ['a1', 'a2']) q = DFGC.select().join(DFC).join(DF) assertNames(q.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) q = DFGCA.select().join(DFCA).join(DFA) assertNames(q.filter(dfc__df=a), ['a1-1', 'a1-2', 'a2-1']) q = DF.select().join(DFC).join(DFGC) assertNames(q.filter(dfc_set__dfgc_set__name='a1-1'), ['a'])
TestDjangoFilterRegression
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_checked_types.py
{ "start": 738, "end": 4941 }
class ____(Exception): """ Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory field is missing. Contains two fields of interest: invariant_errors, a tuple of error data for the failing invariants missing_fields, a tuple of strings specifying the missing names """ def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs): self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes) self.missing_fields = missing_fields super(InvariantException, self).__init__(*args, **kwargs) def __str__(self): return super(InvariantException, self).__str__() + \ ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format( invariant_errors=', '.join(str(e) for e in self.invariant_errors), missing_fields=', '.join(self.missing_fields)) _preserved_iterable_types = ( Enum, ) """Some types are themselves iterable, but we want to use the type itself and not its members for the type specification. This defines a set of such types that we explicitly preserve. Note that strings are not such types because the string inputs we pass in are values, not types. """ def maybe_parse_user_type(t): """Try to coerce a user-supplied type directive into a list of types. This function should be used in all places where a user specifies a type, for consistency. The policy for what defines valid user input should be clear from the implementation. """ is_type = isinstance(t, type) is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types) is_string = isinstance(t, str) is_iterable = isinstance(t, Iterable) if is_preserved: return [t] elif is_string: return [t] elif is_type and not is_iterable: return [t] elif is_iterable: # Recur to validate contained types as well. ts = t return tuple(e for t in ts for e in maybe_parse_user_type(t)) else: # If this raises because `t` cannot be formatted, so be it. raise TypeError( 'Type specifications must be types or strings. Input: {}'.format(t) ) def maybe_parse_many_user_types(ts): # Just a different name to communicate that you're parsing multiple user # inputs. `maybe_parse_user_type` handles the iterable case anyway. return maybe_parse_user_type(ts) def _store_types(dct, bases, destination_name, source_name): maybe_types = maybe_parse_many_user_types([ d[source_name] for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d ]) dct[destination_name] = maybe_types def _merge_invariant_results(result): verdict = True data = [] for verd, dat in result: if not verd: verdict = False data.append(dat) return verdict, tuple(data) def wrap_invariant(invariant): # Invariant functions may return the outcome of several tests # In those cases the results have to be merged before being passed # back to the client. def f(*args, **kwargs): result = invariant(*args, **kwargs) if isinstance(result[0], bool): return result return _merge_invariant_results(result) return f def _all_dicts(bases, seen=None): """ Yield each class in ``bases`` and each of their base classes. """ if seen is None: seen = set() for cls in bases: if cls in seen: continue seen.add(cls) yield cls.__dict__ for b in _all_dicts(cls.__bases__, seen): yield b def store_invariants(dct, bases, destination_name, source_name): # Invariants are inherited invariants = [] for ns in [dct] + list(_all_dicts(bases)): try: invariant = ns[source_name] except KeyError: continue invariants.append(invariant) if not all(callable(invariant) for invariant in invariants): raise TypeError('Invariants must be callable') dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
InvariantException
python
kamyu104__LeetCode-Solutions
Python/sequential-grid-path-cover.py
{ "start": 64, "end": 1170 }
class ____(object): def findPath(self, grid, k): """ :type grid: List[List[int]] :type k: int :rtype: List[List[int]] """ DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1)) def backtracking(i, j, curr): v = grid[i][j] if v and v != curr: return False grid[i][j] = -1 result.append([i, j]) if len(result) == len(grid)*len(grid[0]): return True new_curr = curr+1 if v == curr else curr for di, dj in DIRECTIONS: ni, nj = i+di, j+dj if not (0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[ni][nj] != -1): continue if backtracking(ni, nj, new_curr): return True result.pop() grid[i][j] = v return False result = [] for i in xrange(len(grid)): for j in xrange(len(grid[0])): if backtracking(i, j, 1): return result return result
Solution
python
plotly__plotly.py
plotly/graph_objs/streamtube/_legendgrouptitle.py
{ "start": 233, "end": 2960 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "streamtube" _path_str = "streamtube.legendgrouptitle" _valid_props = {"font", "text"} @property def font(self): """ Sets this legend group's title font. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.streamtube.legendgrouptitle.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.streamtube.legendgrouptitle.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def text(self): """ Sets the title of the legend group. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this legend group's title font. text Sets the title of the legend group. """ def __init__(self, arg=None, font=None, text=None, **kwargs): """ Construct a new Legendgrouptitle object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.streamtube.Legendgrouptitle` font Sets this legend group's title font. text Sets the title of the legend group. Returns ------- Legendgrouptitle """ super().__init__("legendgrouptitle") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.streamtube.Legendgrouptitle constructor must be a dict or an instance of :class:`plotly.graph_objs.streamtube.Legendgrouptitle`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Legendgrouptitle
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_sparkline11.py
{ "start": 345, "end": 8987 }
class ____(unittest.TestCase): """ Test assembling a complete Worksheet file. """ def test_assemble_xml_file(self): """Test writing a worksheet with no cell data.""" self.maxDiff = None fh = StringIO() worksheet = Worksheet() worksheet._set_filehandle(fh) worksheet.select() worksheet.name = "Sheet1" worksheet.excel_version = 2010 data = [-2, 2, 3, -1, 0] worksheet.write_row("A1", data) worksheet.write_row("A2", data) worksheet.write_row("A3", data) worksheet.write_row("A4", [1, 2, 3, 4, 5]) # Set up sparklines. worksheet.add_sparkline( "F1", { "range": "A1:E1", "max": 0.5, "min": -0.5, "axis": True, "reverse": True, "empty_cells": "zero", "weight": 0.25, "high_point": True, "low_point": True, "negative_points": True, "first_point": True, "last_point": True, "markers": True, }, ) worksheet.add_sparkline( "F2", { "range": "A2:E2", "max": "group", "min": "group", "empty_cells": "connect", "weight": 2.25, }, ) worksheet.add_sparkline( "F3", { "range": "A3:E3", "max": "group", "min": "0", "show_hidden": True, "weight": 6, "date_axis": "A4:E4", }, ) worksheet._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac"> <dimension ref="A1:E4"/> <sheetViews> <sheetView tabSelected="1" workbookViewId="0"/> </sheetViews> <sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/> <sheetData> <row r="1" spans="1:5" x14ac:dyDescent="0.25"> <c r="A1"> <v>-2</v> </c> <c r="B1"> <v>2</v> </c> <c r="C1"> <v>3</v> </c> <c r="D1"> <v>-1</v> </c> <c r="E1"> <v>0</v> </c> </row> <row r="2" spans="1:5" x14ac:dyDescent="0.25"> <c r="A2"> <v>-2</v> </c> <c r="B2"> <v>2</v> </c> <c r="C2"> <v>3</v> </c> <c r="D2"> <v>-1</v> </c> <c r="E2"> <v>0</v> </c> </row> <row r="3" spans="1:5" x14ac:dyDescent="0.25"> <c r="A3"> <v>-2</v> </c> <c r="B3"> <v>2</v> </c> <c r="C3"> <v>3</v> </c> <c r="D3"> <v>-1</v> </c> <c r="E3"> <v>0</v> </c> </row> <row r="4" spans="1:5" x14ac:dyDescent="0.25"> <c r="A4"> <v>1</v> </c> <c r="B4"> <v>2</v> </c> <c r="C4"> <v>3</v> </c> <c r="D4"> <v>4</v> </c> <c r="E4"> <v>5</v> </c> </row> </sheetData> <pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/> <extLst> <ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}"> <x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main"> <x14:sparklineGroup manualMin="0" lineWeight="6" dateAxis="1" displayEmptyCellsAs="gap" displayHidden="1" minAxisType="custom" maxAxisType="group"> <x14:colorSeries theme="4" tint="-0.499984740745262"/> <x14:colorNegative theme="5"/> <x14:colorAxis rgb="FF000000"/> <x14:colorMarkers theme="4" tint="-0.499984740745262"/> <x14:colorFirst theme="4" tint="0.39997558519241921"/> <x14:colorLast theme="4" tint="0.39997558519241921"/> <x14:colorHigh theme="4"/> <x14:colorLow theme="4"/> <xm:f>Sheet1!A4:E4</xm:f> <x14:sparklines> <x14:sparkline> <xm:f>Sheet1!A3:E3</xm:f> <xm:sqref>F3</xm:sqref> </x14:sparkline> </x14:sparklines> </x14:sparklineGroup> <x14:sparklineGroup lineWeight="2.25" displayEmptyCellsAs="span" minAxisType="group" maxAxisType="group"> <x14:colorSeries theme="4" tint="-0.499984740745262"/> <x14:colorNegative theme="5"/> <x14:colorAxis rgb="FF000000"/> <x14:colorMarkers theme="4" tint="-0.499984740745262"/> <x14:colorFirst theme="4" tint="0.39997558519241921"/> <x14:colorLast theme="4" tint="0.39997558519241921"/> <x14:colorHigh theme="4"/> <x14:colorLow theme="4"/> <x14:sparklines> <x14:sparkline> <xm:f>Sheet1!A2:E2</xm:f> <xm:sqref>F2</xm:sqref> </x14:sparkline> </x14:sparklines> </x14:sparklineGroup> <x14:sparklineGroup manualMax="0.5" manualMin="-0.5" lineWeight="0.25" markers="1" high="1" low="1" first="1" last="1" negative="1" displayXAxis="1" minAxisType="custom" maxAxisType="custom" rightToLeft="1"> <x14:colorSeries theme="4" tint="-0.499984740745262"/> <x14:colorNegative theme="5"/> <x14:colorAxis rgb="FF000000"/> <x14:colorMarkers theme="4" tint="-0.499984740745262"/> <x14:colorFirst theme="4" tint="0.39997558519241921"/> <x14:colorLast theme="4" tint="0.39997558519241921"/> <x14:colorHigh theme="4"/> <x14:colorLow theme="4"/> <x14:sparklines> <x14:sparkline> <xm:f>Sheet1!A1:E1</xm:f> <xm:sqref>F1</xm:sqref> </x14:sparkline> </x14:sparklines> </x14:sparklineGroup> </x14:sparklineGroups> </ext> </extLst> </worksheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleWorksheet
python
apache__airflow
airflow-core/src/airflow/models/dagwarning.py
{ "start": 1421, "end": 3567 }
class ____(Base): """ A table to store DAG warnings. DAG warnings are problems that don't rise to the level of failing the DAG parse but which users should nonetheless be warned about. These warnings are recorded when parsing DAG and displayed on the Webserver in a flash message. """ dag_id: Mapped[str] = mapped_column(StringID(), primary_key=True) warning_type: Mapped[str] = mapped_column(String(50), primary_key=True) message: Mapped[str] = mapped_column(Text, nullable=False) timestamp: Mapped[datetime] = mapped_column(UtcDateTime, nullable=False, default=timezone.utcnow) dag_model = relationship("DagModel", viewonly=True, lazy="selectin") __tablename__ = "dag_warning" __table_args__ = ( ForeignKeyConstraint( ("dag_id",), ["dag.dag_id"], name="dcw_dag_id_fkey", ondelete="CASCADE", ), Index("idx_dag_warning_dag_id", dag_id), ) def __init__(self, dag_id: str, warning_type: str, message: str, **kwargs): super().__init__(**kwargs) self.dag_id = dag_id self.warning_type = DagWarningType(warning_type).value # make sure valid type self.message = message def __eq__(self, other) -> bool: return self.dag_id == other.dag_id and self.warning_type == other.warning_type def __hash__(self) -> int: return hash((self.dag_id, self.warning_type)) @classmethod @provide_session @retry_db_transaction def purge_inactive_dag_warnings(cls, session: Session = NEW_SESSION) -> None: """ Deactivate DagWarning records for inactive dags. :return: None """ if session.get_bind().dialect.name == "sqlite": dag_ids_stmt = select(DagModel.dag_id).where(DagModel.is_stale == true()) query = delete(cls).where(cls.dag_id.in_(dag_ids_stmt.scalar_subquery())) else: query = delete(cls).where(cls.dag_id == DagModel.dag_id, DagModel.is_stale == true()) session.execute(query.execution_options(synchronize_session=False)) session.commit()
DagWarning
python
huggingface__transformers
tests/models/idefics/test_modeling_idefics.py
{ "start": 1673, "end": 11911 }
class ____: def __init__( self, parent, batch_size=1, seq_length=7, image_size=30, patch_size=2, num_channels=3, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, alpha_initializer="ones", num_labels=3, scope=None, modality_type_vocab_size=2, vision_embed_dim=32, vision_patch_size=2, vision_image_size=30, vision_num_attention_heads=4, vision_num_hidden_layers=2, vision_intermediate_size=37, perceiver_qk_layer_norms_perceiver=False, perceiver_resampler_depth=2, perceiver_resampler_head_dim=8, perceiver_resampler_n_heads=2, perceiver_resampler_n_latents=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.alpha_initializer = alpha_initializer self.num_labels = num_labels self.scope = scope self.modality_type_vocab_size = modality_type_vocab_size self.vision_embed_dim = vision_embed_dim self.vision_patch_size = vision_patch_size self.vision_image_size = vision_image_size self.vision_num_attention_heads = vision_num_attention_heads self.vision_num_hidden_layers = vision_num_hidden_layers self.vision_intermediate_size = vision_intermediate_size self.vision_config = IdeficsVisionConfig( embed_dim=self.vision_embed_dim, patch_size=self.vision_patch_size, image_size=self.vision_image_size, num_attention_heads=self.vision_num_attention_heads, num_hidden_layers=self.vision_num_hidden_layers, intermediate_size=self.vision_intermediate_size, ).to_dict() self.perceiver_qk_layer_norms_perceiver = perceiver_qk_layer_norms_perceiver self.perceiver_resampler_depth = perceiver_resampler_depth self.perceiver_resampler_head_dim = perceiver_resampler_head_dim self.perceiver_resampler_n_heads = perceiver_resampler_n_heads self.perceiver_resampler_n_latents = perceiver_resampler_n_latents self.perceiver_config = IdeficsPerceiverConfig( qk_layer_norms_perceiver=self.perceiver_qk_layer_norms_perceiver, resampler_depth=self.perceiver_resampler_depth, resampler_head_dim=self.perceiver_resampler_head_dim, resampler_n_heads=self.perceiver_resampler_n_heads, resampler_n_latents=self.perceiver_resampler_n_latents, ) # we set the expected sequence length (which is used in several tests) # this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self, num_images=1, interpolate_pos_encoding=False, image_expansion=0): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) pixel_values = floats_tensor( [ self.batch_size, num_images, self.num_channels, self.image_size + image_expansion, self.image_size + image_expansion, ] ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) image_attention_mask = random_attention_mask([self.batch_size, self.seq_length, num_images]) config = self.get_config() return (config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding) def prepare_config_and_inputs_gate_tests(self): # Create a list of configs and inputs, to test 2 things: # 1. For the same image, the output should be different when image_attention_mask is filled with 0s vs filled with 1s. # 2. For 2 different images, the output should be the same when image_attention_mask is filled with 0s. interpolate_pos_encoding = False input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) pixel_values = floats_tensor( [ self.batch_size, 1, self.num_channels, self.image_size, self.image_size, ] ) pixel_values_list = [ pixel_values.clone(), pixel_values.clone(), pixel_values.clone().fill_(0.6), pixel_values.clone().fill_(0.3), ] attention_mask = None if self.use_input_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) image_attention_mask = random_attention_mask([self.batch_size, self.seq_length, 1]) image_attention_mask_list = [ image_attention_mask.clone().fill_(0), image_attention_mask.clone().fill_(1), image_attention_mask.clone().fill_(0), image_attention_mask.clone().fill_(0), ] config = self.get_config() inputs_list = [] for pixel_values, image_attention_mask in zip(pixel_values_list, image_attention_mask_list): inputs_list.append( { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } ) inputs_w_same_img = inputs_list[:2] inputs_w_0_img_attn = inputs_list[2:] return config, inputs_w_same_img, inputs_w_0_img_attn def get_config(self): return IdeficsConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, alpha_initializer=self.alpha_initializer, num_labels=self.num_labels, modality_type_vocab_size=self.modality_type_vocab_size, vision_config=self.vision_config, ) def create_and_check_model( self, config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ): model = IdeficsModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, pixel_values=pixel_values, image_attention_mask=image_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, input_ids.shape[1], self.hidden_size) ) def create_and_check_model_gen( self, config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ): model = IdeficsForVisionText2Text(config) model.to(torch_device) model.eval() model.generate( input_ids, attention_mask=input_mask, pixel_values=pixel_values, image_attention_mask=image_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, max_length=self.seq_length + 2, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } return config, inputs_dict def prepare_pixel_values(self): return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) @require_torch
IdeficsModelTester
python
wandb__wandb
wandb/integration/keras/callbacks/model_checkpoint.py
{ "start": 397, "end": 8529 }
class ____(callbacks.ModelCheckpoint): """A checkpoint that periodically saves a Keras model or model weights. Saved weights are uploaded to W&B as a `wandb.Artifact`. Since this callback is subclassed from `tf.keras.callbacks.ModelCheckpoint`, the checkpointing logic is taken care of by the parent callback. You can learn more here: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/ModelCheckpoint This callback is to be used in conjunction with training using `model.fit()` to save a model or weights (in a checkpoint file) at some interval. The model checkpoints will be logged as W&B Artifacts. You can learn more here: https://docs.wandb.ai/guides/artifacts This callback provides the following features: - Save the model that has achieved "best performance" based on "monitor". - Save the model at the end of every epoch regardless of the performance. - Save the model at the end of epoch or after a fixed number of training batches. - Save only model weights, or save the whole model. - Save the model either in SavedModel format or in `.h5` format. Args: filepath: (Union[str, os.PathLike]) path to save the model file. `filepath` can contain named formatting options, which will be filled by the value of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if `filepath` is `model-{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the validation loss in the filename. monitor: (str) The metric name to monitor. Default to "val_loss". verbose: (int) Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1 displays messages when the callback takes an action. save_best_only: (bool) if `save_best_only=True`, it only saves when the model is considered the "best" and the latest best model according to the quantity monitored will not be overwritten. If `filepath` doesn't contain formatting options like `{epoch}` then `filepath` will be overwritten by each new better model locally. The model logged as an artifact will still be associated with the correct `monitor`. Artifacts will be uploaded continuously and versioned separately as a new best model is found. save_weights_only: (bool) if True, then only the model's weights will be saved. mode: (Mode) one of {'auto', 'min', 'max'}. For `val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. save_freq: (Union[SaveStrategy, int]) `epoch` or integer. When using `'epoch'`, the callback saves the model after each epoch. When using an integer, the callback saves the model at end of this many batches. Note that when monitoring validation metrics such as `val_acc` or `val_loss`, save_freq must be set to "epoch" as those metrics are only available at the end of an epoch. initial_value_threshold: (Optional[float]) Floating point initial "best" value of the metric to be monitored. """ def __init__( self, filepath: StrPath, monitor: str = "val_loss", verbose: int = 0, save_best_only: bool = False, save_weights_only: bool = False, mode: Mode = "auto", save_freq: Union[SaveStrategy, int] = "epoch", initial_value_threshold: Optional[float] = None, **kwargs: Any, ) -> None: super().__init__( filepath=filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, save_freq=save_freq, initial_value_threshold=initial_value_threshold, **kwargs, ) if wandb.run is None: raise wandb.Error( "You must call `wandb.init()` before `WandbModelCheckpoint()`" ) with telemetry.context(run=wandb.run) as tel: tel.feature.keras_model_checkpoint = True self.save_weights_only = save_weights_only # User-friendly warning when trying to save the best model. if self.save_best_only: self._check_filepath() self._is_old_tf_keras_version: Optional[bool] = None def on_train_batch_end( self, batch: int, logs: Optional[Dict[str, float]] = None ) -> None: if self._should_save_on_batch(batch): if self.is_old_tf_keras_version: # Save the model and get filepath self._save_model(epoch=self._current_epoch, logs=logs) filepath = self._get_file_path(epoch=self._current_epoch, logs=logs) else: # Save the model and get filepath self._save_model(epoch=self._current_epoch, batch=batch, logs=logs) filepath = self._get_file_path( epoch=self._current_epoch, batch=batch, logs=logs ) # Log the model as artifact aliases = ["latest", f"epoch_{self._current_epoch}_batch_{batch}"] self._log_ckpt_as_artifact(filepath, aliases=aliases) def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, float]] = None) -> None: super().on_epoch_end(epoch, logs) # Check if model checkpoint is created at the end of epoch. if self.save_freq == "epoch": # Get filepath where the model checkpoint is saved. if self.is_old_tf_keras_version: filepath = self._get_file_path(epoch=epoch, logs=logs) else: filepath = self._get_file_path(epoch=epoch, batch=None, logs=logs) # Log the model as artifact aliases = ["latest", f"epoch_{epoch}"] self._log_ckpt_as_artifact(filepath, aliases=aliases) def _log_ckpt_as_artifact( self, filepath: str, aliases: Optional[List[str]] = None ) -> None: """Log model checkpoint as W&B Artifact.""" try: assert wandb.run is not None model_checkpoint_artifact = wandb.Artifact( f"run_{wandb.run.id}_model", type="model" ) if os.path.isfile(filepath): model_checkpoint_artifact.add_file(filepath) elif os.path.isdir(filepath): model_checkpoint_artifact.add_dir(filepath) else: raise FileNotFoundError(f"No such file or directory {filepath}") wandb.log_artifact(model_checkpoint_artifact, aliases=aliases or []) except ValueError: # This error occurs when `save_best_only=True` and the model # checkpoint is not saved for that epoch/batch. Since TF/Keras # is giving friendly log, we can avoid clustering the stdout. pass def _check_filepath(self) -> None: placeholders = [] for tup in string.Formatter().parse(self.filepath): if tup[1] is not None: placeholders.append(tup[1]) if len(placeholders) == 0: wandb.termwarn( "When using `save_best_only`, ensure that the `filepath` argument " "contains formatting placeholders like `{epoch:02d}` or `{batch:02d}`. " "This ensures correct interpretation of the logged artifacts.", repeat=False, ) @property def is_old_tf_keras_version(self) -> Optional[bool]: if self._is_old_tf_keras_version is None: from packaging.version import parse try: if parse(tf.keras.__version__) < parse("2.6.0"): self._is_old_tf_keras_version = True else: self._is_old_tf_keras_version = False except AttributeError: self._is_old_tf_keras_version = False return self._is_old_tf_keras_version
WandbModelCheckpoint
python
pydata__xarray
asv_bench/benchmarks/dataset.py
{ "start": 441, "end": 703 }
class ____: def setup(self): requires_dask() self.ds = Dataset() array = np.ones(1000) for i in range(250): self.ds[f"var{i}"] = ("x", array) def time_chunk(self): self.ds.chunk(x=(1,) * 1000)
DatasetChunk
python
Netflix__metaflow
metaflow/_vendor/click/types.py
{ "start": 10641, "end": 12481 }
class ____(FloatParamType): """A parameter that works similar to :data:`click.FLOAT` but restricts the value to fit into a range. The default behavior is to fail if the value falls outside the range, but it can also be silently clamped between the two edges. See :ref:`ranges` for an example. """ name = "float range" def __init__(self, min=None, max=None, clamp=False): self.min = min self.max = max self.clamp = clamp def convert(self, value, param, ctx): rv = FloatParamType.convert(self, value, param, ctx) if self.clamp: if self.min is not None and rv < self.min: return self.min if self.max is not None and rv > self.max: return self.max if ( self.min is not None and rv < self.min or self.max is not None and rv > self.max ): if self.min is None: self.fail( "{} is bigger than the maximum valid value {}.".format( rv, self.max ), param, ctx, ) elif self.max is None: self.fail( "{} is smaller than the minimum valid value {}.".format( rv, self.min ), param, ctx, ) else: self.fail( "{} is not in the valid range of {} to {}.".format( rv, self.min, self.max ), param, ctx, ) return rv def __repr__(self): return "FloatRange({}, {})".format(self.min, self.max)
FloatRange
python
huggingface__transformers
src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
{ "start": 12939, "end": 15086 }
class ____(MobileNetV2PreTrainedModel): def __init__(self, config: MobileNetV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v2 = MobileNetV2Model(config) last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: loss = self.loss_function(labels, logits, self.config) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, )
MobileNetV2ForImageClassification
python
tiangolo__fastapi
docs_src/generate_clients/tutorial002.py
{ "start": 159, "end": 212 }
class ____(BaseModel): message: str
ResponseMessage
python
pytorch__pytorch
test/distributed/_composable/test_composability/test_pp_composability.py
{ "start": 2610, "end": 28413 }
class ____(MultiProcessTestCase): @classmethod def backend_str(cls) -> str: # Testing with NCCL backend return backend def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass @property def world_size(self): return 8 @property def device(self): return self.rank @requires_accelerator_dist_backend(["nccl", "xccl"]) @skip_if_lt_x_gpu(8) @skip_but_pass_in_sandcastle_if( not TEST_MULTIGPU and not TEST_XPU, "Test requires 4+ GPUs" ) def test_pp_and_dcp(self): """ Test that pipeline parallelism and distributed checkpointing can be used together and with saved correct FQNs """ class AppState(Stateful): def __init__(self, model, optimizer): self.model = model self.optimizer = optimizer def state_dict(self): # this line automatically manages FSDP FQN's, as well as sets the default state dict type to FSDP.SHARDED_STATE_DICT model_state_dict, optimizer_state_dict = get_state_dict( self.model, self.optimizer ) return {"model": model_state_dict, "optim": optimizer_state_dict} def load_state_dict(self, state_dict): # sets our state dicts on the model and optimizer, now that we've loaded set_state_dict( self.model, self.optimizer, model_state_dict=state_dict["model"], optim_state_dict=state_dict["optim"], ) class PPModelChunk(nn.Module): def __init__(self, layers: nn.ModuleDict, start_index: int, end_index: int): super().__init__() # Filter layers based on start_index and end_index self.layers = nn.ModuleDict( {str(i): layers[str(i)] for i in range(start_index, end_index)} ) def forward(self, x): for layer in self.layers.values(): x = layer(x) return x device = torch.device(device_type, self.device) torch.accelerator.set_device_index(self.device) store = torch.distributed.FileStore(self.file_name, self.world_size) torch.distributed.init_process_group( backend=backend, store=store, rank=self.rank, world_size=self.world_size, device_id=device, ) # create "entire model" total_layers = 8 dim = 10 full_model = nn.ModuleDict( {f"{i}": MLPModule(dim) for i in range(total_layers)} ) # Calculate start and end indices based on rank start_index = self.rank end_index = start_index + 1 pp_model = PPModelChunk(full_model, start_index, end_index) pp_model.to(self.device) opt = torch.optim.Adam(pp_model.parameters(), lr=0.1) # perform work in a temp dir that is cleaned up after the test @with_temp_dir def _dcp_test(self): state_dict = {"app": AppState(pp_model, opt)} dcp.save(state_dict, checkpoint_id=self.temp_dir) # temp checkpoint sd: STATE_DICT_TYPE = {} _load_state_dict( sd, storage_reader=FileSystemReader(self.temp_dir), planner=_EmptyStateDictLoadPlanner(), ) # Check parameter names in sd and compare with pp_model pp_model_param_names = set(pp_model.state_dict().keys()) sd_param_names = set(sd["app"]["model"].keys()) # Verify each parameter name in pp_model is contained in sd for param_name in pp_model_param_names: self.assertIn( param_name, sd_param_names, f"Parameter name '{param_name}' not found in state_dict.", ) _dcp_test(self) @requires_accelerator_dist_backend(["nccl", "xccl"]) @skip_if_lt_x_gpu(8) @skip_but_pass_in_sandcastle_if( not TEST_MULTIGPU and not TEST_XPU, "Test requires 8+ GPUs" ) @parametrize( "ScheduleClass", [ ScheduleGPipe, Schedule1F1B, ScheduleInterleaved1F1B, ScheduleLoopedBFS, ScheduleInterleavedZeroBubble, ], ) @parametrize( "MixedPrecisionParam", [ torch.bfloat16, torch.float32, ], ) def test_3d_with_tp_dp_pp(self, ScheduleClass, MixedPrecisionParam): torch.accelerator.set_device_index(self.device) store = torch.distributed.FileStore(self.file_name, self.world_size) torch.distributed.init_process_group( backend=backend, store=store, rank=self.rank, world_size=self.world_size, ) dim = 8 tp_size = 2 pp_size = 2 num_microbatches = 8 dp_size = self.world_size // (tp_size * pp_size) device_mesh = init_device_mesh( device_type, mesh_shape=(dp_size, pp_size, tp_size), mesh_dim_names=("dp", "pp", "tp"), ) dp_mesh = device_mesh["dp"] tp_mesh = device_mesh["tp"] pp_mesh = device_mesh["pp"] pp_group = device_mesh["pp"].get_group() # create "entire model" total_layers = 8 full_model = nn.ModuleList([MLPModuleEven(dim) for _ in range(total_layers)]) # dummy loss needed just to force backwards to run in schedule step def loss_fn(y, target): return y.sum() # Apply DP to stage module def apply_fsdp(partial_model): # apply FSDP mp_policy = MixedPrecisionPolicy( param_dtype=MixedPrecisionParam, reduce_dtype=torch.float32, ) fsdp_config = {"mesh": dp_mesh, "mp_policy": mp_policy} for layer_id in range(len(partial_model)): fully_shard( partial_model[layer_id], **fsdp_config, reshard_after_forward=False, ) dp_model = fully_shard(partial_model, **fsdp_config) return dp_model def apply_tp( model: nn.Module, tp_mesh: DeviceMesh, ): parallelize_plan = { "net1": ColwiseParallel(), "net2": RowwiseParallel(), "net3": ColwiseParallel(), } for layer in model: parallelize_module(layer, tp_mesh, parallelize_plan) return model if issubclass(ScheduleClass, PipelineScheduleSingle): n_virtual = 1 else: n_virtual = 2 num_stages = pp_group.size() * n_virtual layers_per_stage = total_layers // num_stages stages = [] for i in range(n_virtual): stage_idx = pp_group.rank() + pp_group.size() * i start_layer = stage_idx * layers_per_stage end_layer = start_layer + layers_per_stage # divide the model layers by the number of stages partial_model = nn.Sequential(*full_model[start_layer:end_layer]) partial_model.to(self.device) tp_model = apply_tp(partial_model, tp_mesh) dp_model = apply_fsdp(tp_model) stage = PipelineStage( dp_model, stage_idx, num_stages, self.device, group=pp_group, ) stages.append(stage) partial_models = [pipeline_stage.submod for pipeline_stage in stages] if issubclass(ScheduleClass, PipelineScheduleSingle): stages = stages[0] pipeline_schedule = ScheduleClass( stages, n_microbatches=num_microbatches, loss_fn=loss_fn, scale_grads=False, ) optimizer_kwargs = { "lr": 0.01, "betas": (0.9, 0.95), "weight_decay": 0.1, "fused": False, "foreach": True, } optimizers = [ torch.optim.AdamW(model.parameters(), **optimizer_kwargs) for model in partial_models ] for _train_step in range(5): for optimizer in optimizers: optimizer.zero_grad() inputs = torch.rand((num_microbatches, dim), device=self.device) labels = torch.rand((num_microbatches, dim), device=self.device) is_last_stage = pp_mesh.get_local_rank() == pp_mesh.size() - 1 if pp_mesh.get_local_rank() == 0: pipeline_schedule.step(inputs) elif is_last_stage: losses = [] pipeline_schedule.step(target=labels, losses=losses) else: pipeline_schedule.step() for optimizer in optimizers: optimizer.step() torch.distributed.destroy_process_group() @requires_accelerator_dist_backend(["nccl", "xccl"]) @skip_if_lt_x_gpu(8) @skip_but_pass_in_sandcastle_if( not TEST_MULTIGPU and not TEST_XPU, "Test requires 8+ GPUs" ) @parametrize( "ScheduleClass", [ ScheduleGPipe, Schedule1F1B, ScheduleInterleaved1F1B, ScheduleLoopedBFS, ScheduleInterleavedZeroBubble, ], ) @parametrize( "MixedPrecisionParam", [ torch.bfloat16, torch.float32, ], ) def test_replicate_pp(self, ScheduleClass, MixedPrecisionParam): torch.accelerator.set_device_index(self.device) store = torch.distributed.FileStore(self.file_name, self.world_size) torch.distributed.init_process_group( backend=backend, store=store, rank=self.rank, world_size=self.world_size, ) dim = 8 pp_size = 2 num_microbatches = 8 replicate_size = self.world_size // (pp_size) device_mesh = init_device_mesh( device_type, mesh_shape=(replicate_size, pp_size), mesh_dim_names=("replicate", "pp"), ) torch.manual_seed(42) dp_mesh = device_mesh["replicate"] pp_mesh = device_mesh["pp"] pp_group = device_mesh["pp"].get_group() # create "entire model" total_layers = 8 full_model = nn.ModuleList([MLPModule(dim) for _ in range(total_layers)]) ref_full_model = copy.deepcopy(full_model) # dummy loss needed just to force backwards to run in schedule step def loss_fn(y, target): return y.sum() # Apply DP to stage module def apply_replicate(partial_model): # apply replicate mp_policy = MixedPrecisionPolicy( param_dtype=MixedPrecisionParam, reduce_dtype=torch.float32, ) replicate_config = {"mesh": dp_mesh, "mp_policy": mp_policy} for layer_id in range(len(partial_model)): replicate( partial_model[layer_id], **replicate_config, ) dp_model = replicate(partial_model, **replicate_config) return dp_model # Apply same precision to reference model (without replicate) def apply_same_precision(partial_model): if MixedPrecisionParam != torch.float32: # Cast to same precision as pipeline model partial_model = partial_model.to(dtype=MixedPrecisionParam) return partial_model if issubclass(ScheduleClass, PipelineScheduleSingle): n_virtual = 1 else: n_virtual = 2 num_stages = pp_group.size() * n_virtual layers_per_stage = total_layers // num_stages stages = [] ref_stages = [] for i in range(n_virtual): stage_idx = pp_group.rank() + pp_group.size() * i start_layer = stage_idx * layers_per_stage end_layer = start_layer + layers_per_stage # divide the model layers by the number of stages partial_model = nn.Sequential(*full_model[start_layer:end_layer]) partial_model.to(self.device) ref_partial_model = nn.Sequential(*ref_full_model[start_layer:end_layer]) ref_partial_model.to(self.device) dp_model = apply_replicate(partial_model) ref_dp_model = apply_same_precision(ref_partial_model) stage = PipelineStage( dp_model, stage_idx, num_stages, self.device, group=pp_group, ) ref_stage = PipelineStage( ref_dp_model, stage_idx, num_stages, self.device, group=pp_group, ) stages.append(stage) ref_stages.append(ref_stage) partial_models = [pipeline_stage.submod for pipeline_stage in stages] ref_partial_models = [ pipeline_stage.submod for pipeline_stage in ref_stages ] if issubclass(ScheduleClass, PipelineScheduleSingle): stages = stages[0] ref_stages = ref_stages[0] pipeline_schedule = ScheduleClass( stages, n_microbatches=num_microbatches, loss_fn=loss_fn, scale_grads=False, ) ref_pipeline_schedule = ScheduleClass( ref_stages, n_microbatches=num_microbatches, loss_fn=loss_fn, scale_grads=False, ) optimizer_kwargs = { "lr": 0.01, "betas": (0.9, 0.95), "weight_decay": 0.1, "fused": False, "foreach": True, } optimizers = [ torch.optim.AdamW(model.parameters(), **optimizer_kwargs) for model in partial_models ] ref_optimizers = [ torch.optim.AdamW(model.parameters(), **optimizer_kwargs) for model in ref_partial_models ] for _train_step in range(5): for optimizer in optimizers: optimizer.zero_grad() for ref_optimizer in ref_optimizers: ref_optimizer.zero_grad() inputs = torch.rand( (num_microbatches, dim), device=self.device, dtype=MixedPrecisionParam ) labels = torch.rand( (num_microbatches, dim), device=self.device, dtype=MixedPrecisionParam ) is_last_stage = pp_mesh.get_local_rank() == pp_mesh.size() - 1 if pp_mesh.get_local_rank() == 0: pipeline_schedule.step(inputs) ref_pipeline_schedule.step(inputs) elif is_last_stage: losses = [] ref_losses = [] pipeline_schedule.step(target=labels, losses=losses) ref_pipeline_schedule.step(target=labels, losses=ref_losses) for loss, ref_loss in zip(losses, ref_losses): self.assertEqual(loss, ref_loss) else: pipeline_schedule.step() ref_pipeline_schedule.step() for optimizer in optimizers: optimizer.step() for ref_optimizer in ref_optimizers: ref_optimizer.step() torch.distributed.destroy_process_group() @requires_accelerator_dist_backend(["nccl", "xccl"]) @skip_if_lt_x_gpu(8) @skip_but_pass_in_sandcastle_if( not TEST_MULTIGPU and not TEST_XPU, "Test requires 8+ GPUs" ) @parametrize( "ScheduleClass", [ ScheduleGPipe, Schedule1F1B, ScheduleInterleaved1F1B, ScheduleLoopedBFS, ScheduleInterleavedZeroBubble, ], ) def test_replicate_pp_grads(self, ScheduleClass): torch.accelerator.set_device_index(self.device) store = torch.distributed.FileStore(self.file_name, self.world_size) torch.distributed.init_process_group( backend=backend, store=store, rank=self.rank, world_size=self.world_size, ) dim = 8 pp_size = 2 num_microbatches = 8 replicate_size = self.world_size // (pp_size) device_mesh = init_device_mesh( device_type, mesh_shape=(replicate_size, pp_size), mesh_dim_names=("replicate", "pp"), ) torch.manual_seed(42) dp_mesh = device_mesh["replicate"] pp_mesh = device_mesh["pp"] pp_group = device_mesh["pp"].get_group() dp_group = device_mesh["replicate"].get_group() # create "entire model" total_layers = 8 full_model = nn.ModuleList([MLPModule(dim) for _ in range(total_layers)]) ref_model = nn.Sequential(*copy.deepcopy(full_model)).to(self.device) # dummy loss needed just to force backwards to run in schedule step def loss_fn(y, target): return y.sum() # Simulate microbatch processing for reference model def simulate_stage_forward_backward(model, inputs, labels): """Simulate forward and backward passes through stages for microbatch processing""" batch_size, _ = inputs.shape total_loss = 0 # Split inputs into microbatches microbatch_size = batch_size // num_microbatches for mb_idx in range(num_microbatches): start_idx = mb_idx * microbatch_size end_idx = start_idx + microbatch_size mb_input = inputs[start_idx:end_idx] mb_label = labels[start_idx:end_idx] if labels is not None else None # Simulate stage-by-stage processing if issubclass(ScheduleClass, PipelineScheduleSingle): num_stages = pp_group.size() layers_per_stage = total_layers // pp_group.size() # 8 // 2 = 4 else: n_virtual = 2 num_stages = pp_group.size() * n_virtual layers_per_stage = total_layers // num_stages # Forward pass through all stages x = mb_input for stage in range(num_stages): start_layer = stage * layers_per_stage end_layer = start_layer + layers_per_stage # Process layers for this stage for layer_idx in range(start_layer, min(end_layer, len(model))): x = model[layer_idx](x) mb_loss = loss_fn(x, mb_label) total_loss += mb_loss # Backward pass mb_loss.backward() return total_loss / num_microbatches # Apply replicate to stage module def apply_replicate(partial_model): for layer_id in range(len(partial_model)): replicate( partial_model[layer_id], mesh=dp_mesh, ) dp_model = replicate(partial_model, mesh=dp_mesh) return dp_model def pipelined_models_parameters(start_layer, model): layer_idx = start_layer for layer in model.children(): for name, param in layer.named_parameters(): updated_param_name = f"{layer_idx}.{name}" pipeline_model_parameter_dict[updated_param_name] = param layer_idx += 1 def check_gradient_parity( pipeline_model_parameter_dict, ref_model_parameter_dict ): for parameter in pipeline_model_parameter_dict: assert parameter in ref_model_parameter_dict pipeline_parameter = pipeline_model_parameter_dict[parameter] if pipeline_parameter.grad is not None: pipeline_parameter_grad = pipeline_parameter.grad.to_local() ref_parameter = ref_model_parameter_dict[parameter] if ref_parameter.grad is not None: torch.testing.assert_close( pipeline_parameter_grad, ref_parameter.grad, rtol=1e-4, atol=1e-5, ) else: assert pipeline_parameter.grad is None pipeline_model_parameter_dict = {} if issubclass(ScheduleClass, PipelineScheduleSingle): n_virtual = 1 else: n_virtual = 2 num_stages = pp_group.size() * n_virtual layers_per_stage = total_layers // num_stages stages = [] for i in range(n_virtual): stage_idx = pp_group.rank() + pp_group.size() * i start_layer = stage_idx * layers_per_stage end_layer = start_layer + layers_per_stage # divide the model layers by the number of stages partial_model = nn.Sequential(*full_model[start_layer:end_layer]) partial_model.to(self.device) dp_model = apply_replicate(partial_model) pipelined_models_parameters(start_layer, dp_model) stage = PipelineStage( dp_model, stage_idx, num_stages, self.device, group=pp_group, ) stages.append(stage) partial_models = [pipeline_stage.submod for pipeline_stage in stages] if issubclass(ScheduleClass, PipelineScheduleSingle): stages = stages[0] pipeline_schedule = ScheduleClass( stages, n_microbatches=num_microbatches, loss_fn=loss_fn, scale_grads=False, ) optimizer_kwargs = { "lr": 0.01, "betas": (0.9, 0.95), "weight_decay": 0.1, "fused": False, "foreach": True, } optimizers = [ torch.optim.AdamW(model.parameters(), **optimizer_kwargs) for model in partial_models ] ref_optimizer = torch.optim.AdamW(ref_model.parameters(), **optimizer_kwargs) # Helper function to simulate all-reduce for reference model gradients def simulate_all_reduce_grads(model, group): """Simulate all-reduce operation on gradients like replicate does""" for param in model.parameters(): if param.grad is not None: # Scale by the number of replicas (like replicate does) param.grad.div_(group.size()) # Simulate all-reduce torch.distributed.all_reduce(param.grad, group=group) ref_model_parameter_dict = {} ref_model_parameter_dict = dict(ref_model.named_parameters()) torch.manual_seed(42 + self.rank) for _ in range(5): for optimizer in optimizers: optimizer.zero_grad() ref_optimizer.zero_grad() inputs = torch.rand((num_microbatches, dim), device=self.device) labels = torch.rand((num_microbatches, dim), device=self.device) # Ensure all ranks use the same inputs/labels for comparison torch.distributed.broadcast(inputs, 0) torch.distributed.broadcast(labels, 0) is_last_stage = pp_mesh.get_local_rank() == pp_mesh.size() - 1 # Run pipeline schedule if pp_mesh.get_local_rank() == 0: pipeline_schedule.step(inputs) elif is_last_stage: losses = [] pipeline_schedule.step(target=labels, losses=losses) else: pipeline_schedule.step() # Run reference model simulation if is_last_stage: ref_loss = simulate_stage_forward_backward(ref_model, inputs, labels) # Simulate all-reduce on reference model gradients simulate_all_reduce_grads(ref_model, dp_group) # Compare losses - only check on last stage where we have losses if "losses" in locals() and len(losses) > 0: # Average the microbatch losses to match ref_loss avg_pipeline_loss = sum(losses) / len(losses) torch.testing.assert_close( avg_pipeline_loss, ref_loss, rtol=1e-4, atol=1e-5 ) else: # For non-last stages, still run ref model to generate gradients simulate_stage_forward_backward(ref_model, inputs, None) simulate_all_reduce_grads(ref_model, dp_group) # Step optimizers for optimizer in optimizers: optimizer.step() ref_optimizer.step() check_gradient_parity( pipeline_model_parameter_dict, ref_model_parameter_dict ) torch.distributed.destroy_process_group() instantiate_parametrized_tests(ComposabilityTest) if __name__ == "__main__": run_tests()
ComposabilityTest
python
marshmallow-code__apispec
tests/test_ext_marshmallow.py
{ "start": 13708, "end": 48348 }
class ____: @pytest.fixture def make_pet_callback_spec(self, spec_fixture): def _make_pet_spec(operations): spec_fixture.spec.path( path="/pet", operations={ "post": {"callbacks": {"petEvent": {"petCallbackUrl": operations}}} }, ) return spec_fixture return _make_pet_spec @pytest.mark.parametrize( "pet_schema", (PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"), ) @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_v2(self, spec_fixture, pet_schema): spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": { 200: { "schema": pet_schema, "description": "successful operation", "headers": {"PetHeader": {"schema": pet_schema}}, } } } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] if isinstance(pet_schema, Schema) and pet_schema.many is True: assert get["responses"]["200"]["schema"]["type"] == "array" schema_reference = get["responses"]["200"]["schema"]["items"] assert ( get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"] == "array" ) header_reference = get["responses"]["200"]["headers"]["PetHeader"][ "schema" ]["items"] else: schema_reference = get["responses"]["200"]["schema"] header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"] assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert len(spec_fixture.spec.components.schemas) == 1 resolved_schema = spec_fixture.spec.components.schemas["Pet"] assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema) assert get["responses"]["200"]["description"] == "successful operation" @pytest.mark.parametrize( "pet_schema", (PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"), ) @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_v3(self, spec_fixture, pet_schema): spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": { 200: { "content": {"application/json": {"schema": pet_schema}}, "description": "successful operation", "headers": {"PetHeader": {"schema": pet_schema}}, } } } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] if isinstance(pet_schema, Schema) and pet_schema.many is True: assert ( get["responses"]["200"]["content"]["application/json"]["schema"]["type"] == "array" ) schema_reference = get["responses"]["200"]["content"]["application/json"][ "schema" ]["items"] assert ( get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"] == "array" ) header_reference = get["responses"]["200"]["headers"]["PetHeader"][ "schema" ]["items"] else: schema_reference = get["responses"]["200"]["content"]["application/json"][ "schema" ] header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"] assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert len(spec_fixture.spec.components.schemas) == 1 resolved_schema = spec_fixture.spec.components.schemas["Pet"] assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema) assert get["responses"]["200"]["description"] == "successful operation" @pytest.mark.parametrize( "pet_schema", (PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"), ) @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_v3(self, make_pet_callback_spec, pet_schema): spec_fixture = make_pet_callback_spec( { "get": { "responses": { "200": { "content": {"application/json": {"schema": pet_schema}}, "description": "successful operation", "headers": {"PetHeader": {"schema": pet_schema}}, } } } } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] if isinstance(pet_schema, Schema) and pet_schema.many is True: assert ( get["responses"]["200"]["content"]["application/json"]["schema"]["type"] == "array" ) schema_reference = get["responses"]["200"]["content"]["application/json"][ "schema" ]["items"] assert ( get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"] == "array" ) header_reference = get["responses"]["200"]["headers"]["PetHeader"][ "schema" ]["items"] else: schema_reference = get["responses"]["200"]["content"]["application/json"][ "schema" ] header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"] assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet") assert len(spec_fixture.spec.components.schemas) == 1 resolved_schema = spec_fixture.spec.components.schemas["Pet"] assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema) assert get["responses"]["200"]["description"] == "successful operation" @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_expand_parameters_v2(self, spec_fixture): spec_fixture.spec.path( path="/pet", operations={ "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": { "parameters": [ { "in": "body", "description": "a pet schema", "required": True, "name": "pet", "schema": PetSchema, } ] }, }, ) p = get_paths(spec_fixture.spec)["/pet"] get = p["get"] assert get["parameters"] == spec_fixture.openapi.schema2parameters( PetSchema(), location="query" ) post = p["post"] assert post["parameters"] == spec_fixture.openapi.schema2parameters( PetSchema, location="body", required=True, name="pet", description="a pet schema", ) @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_expand_parameters_v3(self, spec_fixture): spec_fixture.spec.path( path="/pet", operations={ "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": { "requestBody": { "description": "a pet schema", "required": True, "content": {"application/json": {"schema": PetSchema}}, } }, }, ) p = get_paths(spec_fixture.spec)["/pet"] get = p["get"] assert get["parameters"] == spec_fixture.openapi.schema2parameters( PetSchema(), location="query" ) for parameter in get["parameters"]: description = parameter.get("description", False) assert description name = parameter["name"] assert description == PetSchema.description[name] post = p["post"] post_schema = spec_fixture.marshmallow_plugin.resolver.resolve_schema_dict( PetSchema ) assert ( post["requestBody"]["content"]["application/json"]["schema"] == post_schema ) assert post["requestBody"]["description"] == "a pet schema" assert post["requestBody"]["required"] @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_expand_parameters_v3(self, make_pet_callback_spec): spec_fixture = make_pet_callback_spec( { "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": { "requestBody": { "description": "a pet schema", "required": True, "content": {"application/json": {"schema": PetSchema}}, } }, } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] assert get["parameters"] == spec_fixture.openapi.schema2parameters( PetSchema(), location="query" ) for parameter in get["parameters"]: description = parameter.get("description", False) assert description name = parameter["name"] assert description == PetSchema.description[name] post = c["post"] post_schema = spec_fixture.marshmallow_plugin.resolver.resolve_schema_dict( PetSchema ) assert ( post["requestBody"]["content"]["application/json"]["schema"] == post_schema ) assert post["requestBody"]["description"] == "a pet schema" assert post["requestBody"]["required"] @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_uses_ref_if_available_v2(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}} ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["schema"] == build_ref( spec_fixture.spec, "schema", "Pet" ) @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_uses_ref_if_available_v3(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": { 200: {"content": {"application/json": {"schema": PetSchema}}} } } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["content"]["application/json"][ "schema" ] == build_ref(spec_fixture.spec, "schema", "Pet") @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_uses_ref_if_available_v3(self, make_pet_callback_spec): spec_fixture = make_pet_callback_spec( { "get": { "responses": { "200": {"content": {"application/json": {"schema": PetSchema}}} } } } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] assert get["responses"]["200"]["content"]["application/json"][ "schema" ] == build_ref(spec_fixture.spec, "schema", "Pet") def test_schema_uses_ref_if_available_name_resolver_returns_none_v2(self): def resolver(schema): return None spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.components.schema("Pet", schema=PetSchema) spec.path( path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}} ) get = get_paths(spec)["/pet"]["get"] assert get["responses"]["200"]["schema"] == build_ref(spec, "schema", "Pet") def test_schema_uses_ref_if_available_name_resolver_returns_none_v3(self): def resolver(schema): return None spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="3.0.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.components.schema("Pet", schema=PetSchema) spec.path( path="/pet", operations={ "get": { "responses": { 200: {"content": {"application/json": {"schema": PetSchema}}} } } }, ) get = get_paths(spec)["/pet"]["get"] assert get["responses"]["200"]["content"]["application/json"][ "schema" ] == build_ref(spec, "schema", "Pet") @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_resolver_allof_v2(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.components.schema("Sample", schema=SampleSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": {200: {"schema": {"allOf": [PetSchema, SampleSchema]}}} } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["schema"] == { "allOf": [ build_ref(spec_fixture.spec, "schema", "Pet"), build_ref(spec_fixture.spec, "schema", "Sample"), ] } @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) @pytest.mark.parametrize("combinator", ["oneOf", "anyOf", "allOf"]) def test_schema_resolver_oneof_anyof_allof_v3(self, spec_fixture, combinator): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": { 200: { "content": { "application/json": { "schema": {combinator: [PetSchema, SampleSchema]} } } } } } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["content"]["application/json"]["schema"] == { combinator: [ build_ref(spec_fixture.spec, "schema", "Pet"), build_ref(spec_fixture.spec, "schema", "Sample"), ] } @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_resolver_not_v2(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={"get": {"responses": {200: {"schema": {"not": PetSchema}}}}}, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["schema"] == { "not": build_ref(spec_fixture.spec, "schema", "Pet"), } @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_resolver_not_v3(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "responses": { 200: { "content": { "application/json": {"schema": {"not": PetSchema}} } } } } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert get["responses"]["200"]["content"]["application/json"]["schema"] == { "not": build_ref(spec_fixture.spec, "schema", "Pet"), } @pytest.mark.parametrize( "pet_schema", (PetSchema, PetSchema(), "tests.schemas.PetSchema"), ) def test_schema_name_resolver_returns_none_v2(self, pet_schema): def resolver(schema): return None spec = APISpec( title="Test resolver returns None", version="0.1", openapi_version="2.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.path( path="/pet", operations={"get": {"responses": {200: {"schema": pet_schema}}}}, ) get = get_paths(spec)["/pet"]["get"] assert "properties" in get["responses"]["200"]["schema"] @pytest.mark.parametrize( "pet_schema", (PetSchema, PetSchema(), "tests.schemas.PetSchema"), ) def test_schema_name_resolver_returns_none_v3(self, pet_schema): def resolver(schema): return None spec = APISpec( title="Test resolver returns None", version="0.1", openapi_version="3.0.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.path( path="/pet", operations={ "get": { "responses": { 200: {"content": {"application/json": {"schema": pet_schema}}} } } }, ) get = get_paths(spec)["/pet"]["get"] assert ( "properties" in get["responses"]["200"]["content"]["application/json"]["schema"] ) def test_callback_schema_uses_ref_if_available_name_resolver_returns_none_v3(self): def resolver(schema): return None spec = APISpec( title="Test auto-reference", version="0.1", openapi_version="3.0.0", plugins=(MarshmallowPlugin(schema_name_resolver=resolver),), ) spec.components.schema("Pet", schema=PetSchema) spec.path( path="/pet", operations={ "post": { "callbacks": { "petEvent": { "petCallbackUrl": { "get": { "responses": { "200": { "content": { "application/json": { "schema": PetSchema } } } } } } } } } }, ) p = get_paths(spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] assert get["responses"]["200"]["content"]["application/json"][ "schema" ] == build_ref(spec, "schema", "Pet") @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_uses_ref_in_parameters_and_request_body_if_available_v2( self, spec_fixture ): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": {"parameters": [{"in": "body", "schema": PetSchema}]}, }, ) p = get_paths(spec_fixture.spec)["/pet"] assert "schema" not in p["get"]["parameters"][0] post = p["post"] assert len(post["parameters"]) == 1 assert post["parameters"][0]["schema"] == build_ref( spec_fixture.spec, "schema", "Pet" ) @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_uses_ref_in_parameters_and_request_body_if_available_v3( self, spec_fixture ): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": { "requestBody": { "content": {"application/json": {"schema": PetSchema}} } }, }, ) p = get_paths(spec_fixture.spec)["/pet"] assert "schema" in p["get"]["parameters"][0] post = p["post"] schema_ref = post["requestBody"]["content"]["application/json"]["schema"] assert schema_ref == build_ref(spec_fixture.spec, "schema", "Pet") @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_uses_ref_in_parameters_and_request_body_if_available_v3( self, make_pet_callback_spec ): spec_fixture = make_pet_callback_spec( { "get": {"parameters": [{"in": "query", "schema": PetSchema}]}, "post": { "requestBody": { "content": {"application/json": {"schema": PetSchema}} } }, } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] assert "schema" in c["get"]["parameters"][0] post = c["post"] schema_ref = post["requestBody"]["content"]["application/json"]["schema"] assert schema_ref == build_ref(spec_fixture.spec, "schema", "Pet") @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_array_uses_ref_if_available_v2(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "parameters": [ { "name": "petSchema", "in": "body", "schema": {"type": "array", "items": PetSchema}, } ], "responses": { 200: {"schema": {"type": "array", "items": PetSchema}} }, } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert len(get["parameters"]) == 1 resolved_schema = { "type": "array", "items": build_ref(spec_fixture.spec, "schema", "Pet"), } assert get["parameters"][0]["schema"] == resolved_schema assert get["responses"]["200"]["schema"] == resolved_schema @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_array_uses_ref_if_available_v3(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/pet", operations={ "get": { "parameters": [ { "name": "Pet", "in": "query", "content": { "application/json": { "schema": {"type": "array", "items": PetSchema} } }, } ], "responses": { 200: { "content": { "application/json": { "schema": {"type": "array", "items": PetSchema} } } } }, } }, ) get = get_paths(spec_fixture.spec)["/pet"]["get"] assert len(get["parameters"]) == 1 resolved_schema = { "type": "array", "items": build_ref(spec_fixture.spec, "schema", "Pet"), } request_schema = get["parameters"][0]["content"]["application/json"]["schema"] assert request_schema == resolved_schema response_schema = get["responses"]["200"]["content"]["application/json"][ "schema" ] assert response_schema == resolved_schema @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_array_uses_ref_if_available_v3( self, make_pet_callback_spec ): spec_fixture = make_pet_callback_spec( { "get": { "parameters": [ { "name": "Pet", "in": "query", "content": { "application/json": { "schema": {"type": "array", "items": PetSchema} } }, } ], "responses": { "200": { "content": { "application/json": { "schema": {"type": "array", "items": PetSchema} } } } }, } } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] assert len(get["parameters"]) == 1 resolved_schema = { "type": "array", "items": build_ref(spec_fixture.spec, "schema", "Pet"), } request_schema = get["parameters"][0]["content"]["application/json"]["schema"] assert request_schema == resolved_schema response_schema = get["responses"]["200"]["content"]["application/json"][ "schema" ] assert response_schema == resolved_schema @pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True) def test_schema_partially_v2(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/parents", operations={ "get": { "responses": { 200: { "schema": { "type": "object", "properties": { "mother": PetSchema, "father": PetSchema, }, } } } } }, ) get = get_paths(spec_fixture.spec)["/parents"]["get"] assert get["responses"]["200"]["schema"] == { "type": "object", "properties": { "mother": build_ref(spec_fixture.spec, "schema", "Pet"), "father": build_ref(spec_fixture.spec, "schema", "Pet"), }, } @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_schema_partially_v3(self, spec_fixture): spec_fixture.spec.components.schema("Pet", schema=PetSchema) spec_fixture.spec.path( path="/parents", operations={ "get": { "responses": { 200: { "content": { "application/json": { "schema": { "type": "object", "properties": { "mother": PetSchema, "father": PetSchema, }, } } } } } } }, ) get = get_paths(spec_fixture.spec)["/parents"]["get"] assert get["responses"]["200"]["content"]["application/json"]["schema"] == { "type": "object", "properties": { "mother": build_ref(spec_fixture.spec, "schema", "Pet"), "father": build_ref(spec_fixture.spec, "schema", "Pet"), }, } @pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True) def test_callback_schema_partially_v3(self, make_pet_callback_spec): spec_fixture = make_pet_callback_spec( { "get": { "responses": { "200": { "content": { "application/json": { "schema": { "type": "object", "properties": { "mother": PetSchema, "father": PetSchema, }, } } } } } } } ) p = get_paths(spec_fixture.spec)["/pet"] c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"] get = c["get"] assert get["responses"]["200"]["content"]["application/json"]["schema"] == { "type": "object", "properties": { "mother": build_ref(spec_fixture.spec, "schema", "Pet"), "father": build_ref(spec_fixture.spec, "schema", "Pet"), }, } def test_parameter_reference(self, spec_fixture): if spec_fixture.spec.openapi_version.major < 3: param = {"schema": PetSchema} else: param = {"content": {"application/json": {"schema": PetSchema}}} spec_fixture.spec.components.parameter("Pet", "body", param) spec_fixture.spec.path( path="/parents", operations={"get": {"parameters": ["Pet"]}} ) get = get_paths(spec_fixture.spec)["/parents"]["get"] assert get["parameters"] == [build_ref(spec_fixture.spec, "parameter", "Pet")] def test_response_reference(self, spec_fixture): if spec_fixture.spec.openapi_version.major < 3: resp = {"schema": PetSchema} else: resp = {"content": {"application/json": {"schema": PetSchema}}} spec_fixture.spec.components.response("Pet", resp) spec_fixture.spec.path( path="/parents", operations={"get": {"responses": {"200": "Pet"}}} ) get = get_paths(spec_fixture.spec)["/parents"]["get"] assert get["responses"] == { "200": build_ref(spec_fixture.spec, "response", "Pet") } def test_schema_global_state_untouched_2json(self, spec_fixture): assert get_nested_schema(RunSchema, "sample") is None data = spec_fixture.openapi.schema2jsonschema(RunSchema) json.dumps(data) assert get_nested_schema(RunSchema, "sample") is None def test_schema_global_state_untouched_2parameters(self, spec_fixture): assert get_nested_schema(RunSchema, "sample") is None data = spec_fixture.openapi.schema2parameters(RunSchema, location="json") json.dumps(data) assert get_nested_schema(RunSchema, "sample") is None def test_resolve_schema_dict_ref_as_string(self, spec): """Test schema ref passed as string""" # The case tested here is a reference passed as string, not a # marshmallow Schema passed by name as string. We want to ensure the # MarshmallowPlugin does not interfere with the feature interpreting # strings as references. Therefore, we use a specific name to ensure # there is no Schema with that name in the marshmallow registry from # somewhere else in the tests. # e.g. PetSchema is in the registry already so it wouldn't work. schema = {"schema": "SomeSpecificPetSchema"} if spec.openapi_version.major >= 3: schema = {"content": {"application/json": schema}} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": schema}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] if spec.openapi_version.major < 3: schema = resp["schema"] else: schema = resp["content"]["application/json"]["schema"] assert schema == build_ref(spec, "schema", "SomeSpecificPetSchema")
TestOperationHelper
python
redis__redis-py
redis/commands/search/reducers.py
{ "start": 1524, "end": 1798 }
class ____(FieldOnlyReducer): """ Calculate the number of distinct values contained in all the results in the group for the given field """ NAME = "COUNT_DISTINCT" def __init__(self, field: str) -> None: super().__init__(field)
count_distinct
python
Netflix__metaflow
metaflow/plugins/debug_logger.py
{ "start": 115, "end": 265 }
class ____(NullEventLogger): TYPE = "debugLogger" @classmethod def get_worker(cls): return DebugEventLoggerSidecar
DebugEventLogger
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/module_setenv_raw/package.py
{ "start": 217, "end": 555 }
class ____(Package): homepage = "http://www.spack.llnl.gov" url = "http://www.spack.llnl.gov/module-setenv-raw-1.0.tar.gz" version("1.0", "0123456789abcdef0123456789abcdef") def setup_run_environment(self, env: EnvironmentModifications) -> None: env.set("FOO", "{{name}}, {name}, {{}}, {}", raw=True)
ModuleSetenvRaw
python
automl__auto-sklearn
test/test_pipeline/components/regression/test_support_vector_regression.py
{ "start": 160, "end": 722 }
class ____(BaseRegressionComponentTest): __test__ = True res = dict() res["default_boston"] = -0.030006883949312613 res["default_boston_iterative"] = None res["default_boston_sparse"] = -0.062749211736050192 res["default_boston_iterative_sparse"] = None res["default_diabetes"] = 0.12849591861430087 res["default_diabetes_iterative"] = None res["default_diabetes_sparse"] = 0.0098877566961463881 res["default_diabetes_iterative_sparse"] = None sk_mod = sklearn.svm.SVR module = LibSVM_SVR
SupportVectorComponentTest
python
run-llama__llama_index
llama-index-core/llama_index/core/base/base_selector.py
{ "start": 531, "end": 1973 }
class ____(BaseModel): """A multi-selection of choices.""" selections: List[SingleSelection] @property def ind(self) -> int: if len(self.selections) != 1: raise ValueError( f"There are {len(self.selections)} selections, please use .inds." ) return self.selections[0].index @property def reason(self) -> str: if len(self.reasons) != 1: raise ValueError( f"There are {len(self.reasons)} selections, please use .reasons." ) return self.selections[0].reason @property def inds(self) -> List[int]: return [x.index for x in self.selections] @property def reasons(self) -> List[str]: return [x.reason for x in self.selections] # separate name for clarity and to not confuse function calling model SelectorResult = MultiSelection def _wrap_choice(choice: MetadataType) -> ToolMetadata: if isinstance(choice, ToolMetadata): return choice elif isinstance(choice, str): return ToolMetadata(description=choice) else: raise ValueError(f"Unexpected type: {type(choice)}") def _wrap_query(query: QueryType) -> QueryBundle: if isinstance(query, QueryBundle): return query elif isinstance(query, str): return QueryBundle(query_str=query) else: raise ValueError(f"Unexpected type: {type(query)}")
MultiSelection
python
django__django
django/db/models/fields/related_lookups.py
{ "start": 6036, "end": 6094 }
class ____(RelatedLookupMixin, IsNull): pass
RelatedIsNull
python
pytorch__pytorch
torchgen/model.py
{ "start": 78745, "end": 79251 }
class ____(Type): elem: Type def __str__(self) -> str: return f"{self.elem}?" def is_base_ty_like(self, base_ty: BaseTy) -> bool: return self.elem.is_base_ty_like(base_ty) def is_symint_like(self) -> bool: return self.elem.is_symint_like() def is_nullable(self) -> bool: return True def is_list_like(self) -> ListType | None: return self.elem.is_list_like() # A type representing a PyTorch custom class @dataclass(frozen=True)
OptionalType
python
numba__numba
numba/core/callconv.py
{ "start": 1530, "end": 5525 }
class ____(object): def __init__(self, context): self.context = context def return_optional_value(self, builder, retty, valty, value): if valty == types.none: # Value is none self.return_native_none(builder) elif retty == valty: # Value is an optional, need a runtime switch optval = self.context.make_helper(builder, retty, value=value) validbit = cgutils.as_bool_bit(builder, optval.valid) with builder.if_then(validbit): retval = self.context.get_return_value(builder, retty.type, optval.data) self.return_value(builder, retval) self.return_native_none(builder) elif not isinstance(valty, types.Optional): # Value is not an optional, need a cast if valty != retty.type: value = self.context.cast(builder, value, fromty=valty, toty=retty.type) retval = self.context.get_return_value(builder, retty.type, value) self.return_value(builder, retval) else: raise NotImplementedError("returning {0} for {1}".format(valty, retty)) def return_native_none(self, builder): self._return_errcode_raw(builder, RETCODE_NONE) def return_exc(self, builder): self._return_errcode_raw(builder, RETCODE_EXC) def return_stop_iteration(self, builder): self._return_errcode_raw(builder, RETCODE_STOPIT) def get_return_type(self, ty): """ Get the actual type of the return argument for Numba type *ty*. """ restype = self.context.data_model_manager[ty].get_return_type() return restype.as_pointer() def init_call_helper(self, builder): """ Initialize and return a call helper object for the given builder. """ ch = self._make_call_helper(builder) builder.__call_helper = ch return ch def _get_call_helper(self, builder): return builder.__call_helper def unpack_exception(self, builder, pyapi, status): return pyapi.unserialize(status.excinfoptr) def raise_error(self, builder, pyapi, status): """ Given a non-ok *status*, raise the corresponding Python exception. """ bbend = builder.function.append_basic_block() with builder.if_then(status.is_user_exc): # Unserialize user exception. # Make sure another error may not interfere. pyapi.err_clear() exc = self.unpack_exception(builder, pyapi, status) with cgutils.if_likely(builder, cgutils.is_not_null(builder, exc)): pyapi.raise_object(exc) # steals ref builder.branch(bbend) with builder.if_then(status.is_stop_iteration): pyapi.err_set_none("PyExc_StopIteration") builder.branch(bbend) with builder.if_then(status.is_python_exc): # Error already raised => nothing to do builder.branch(bbend) pyapi.err_set_string("PyExc_SystemError", "unknown error when calling native function") builder.branch(bbend) builder.position_at_end(bbend) def decode_arguments(self, builder, argtypes, func): """ Get the decoded (unpacked) Python arguments with *argtypes* from LLVM function *func*. A tuple of LLVM values is returned. """ raw_args = self.get_arguments(func) arginfo = self._get_arg_packer(argtypes) return arginfo.from_arguments(builder, raw_args) def _get_arg_packer(self, argtypes): """ Get an argument packer for the given argument types. """ return self.context.get_arg_packer(argtypes)
BaseCallConv
python
neetcode-gh__leetcode
python/0023-merge-k-sorted-lists.py
{ "start": 151, "end": 1030 }
class ____: def mergeKLists(self, lists: List[ListNode]) -> ListNode: if not lists or len(lists) == 0: return None while len(lists) > 1: mergedLists = [] for i in range(0, len(lists), 2): l1 = lists[i] l2 = lists[i + 1] if (i + 1) < len(lists) else None mergedLists.append(self.mergeList(l1, l2)) lists = mergedLists return lists[0] def mergeList(self, l1, l2): dummy = ListNode() tail = dummy while l1 and l2: if l1.val < l2.val: tail.next = l1 l1 = l1.next else: tail.next = l2 l2 = l2.next tail = tail.next if l1: tail.next = l1 if l2: tail.next = l2 return dummy.next
Solution
python
getsentry__sentry
src/sentry/preprod/pull_request/types.py
{ "start": 1207, "end": 1980 }
class ____(BaseModel): """ Represents pull request details, normalized across SCM providers. """ id: str | None = None # Provider-specific ID number: int # PR/MR number title: str | None = None description: str | None = None state: PullRequestState author: PullRequestAuthor source_branch: str | None = None target_branch: str | None = None created_at: datetime | None = None updated_at: datetime | None = None merged_at: datetime | None = None closed_at: datetime | None = None url: str | None = None # Provider URL to the PR commits_count: int additions: int # Total additions across all files deletions: int # Total deletions across all files changed_files_count: int
PullRequestDetails
python
PrefectHQ__prefect
src/prefect/server/schemas/filters.py
{ "start": 45926, "end": 46535 }
class ____(PrefectOperatorFilterBaseModel): """Filter for deployments. Only deployments matching all criteria will be returned.""" active: Optional[DeploymentScheduleFilterActive] = Field( default=None, description="Filter criteria for `DeploymentSchedule.active`" ) def _get_filter_list( self, db: "PrefectDBInterface" ) -> Iterable[sa.ColumnExpressionArgument[bool]]: filters: list[sa.ColumnExpressionArgument[bool]] = [] if self.active is not None: filters.append(self.active.as_sql_filter()) return filters
DeploymentScheduleFilter
python
encode__django-rest-framework
tests/test_filters.py
{ "start": 32133, "end": 32333 }
class ____(serializers.ModelSerializer): username = serializers.CharField() class Meta: model = SensitiveOrderingFilterModel fields = ('id', 'username')
SensitiveDataSerializer1
python
huggingface__transformers
tests/models/seggpt/test_image_processing_seggpt.py
{ "start": 1130, "end": 3543 }
class ____: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def expected_post_processed_shape(self): return self.size["height"] // 2, self.size["width"] def get_fake_image_segmentation_output(self): torch.manual_seed(42) return SegGptImageSegmentationOutput( pred_masks=torch.rand(self.batch_size, self.num_channels, self.size["height"], self.size["width"]) ) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_mask(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] return ds[0]["mask"].convert("L") def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] images = [image.convert("RGB") for image in ds["image"]] masks = [image.convert("RGB") for image in ds["mask"]] return images, masks @require_torch @require_vision
SegGptImageProcessingTester
python
huggingface__transformers
tests/models/vitmatte/test_image_processing_vitmatte.py
{ "start": 3124, "end": 17672 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VitMatteImageProcessor if is_vision_available() else None fast_image_processing_class = VitMatteImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VitMatteImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisor")) def test_call_numpy(self): # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility and that correct dimensions got merged self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-3] == 4) def test_call_pytorch(self): # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[1:]) for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility and that correct dimensions got merged self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-3] == 4) # create batched tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) image_input = torch.stack(image_inputs, dim=0) self.assertIsInstance(image_input, torch.Tensor) self.assertTrue(image_input.shape[1] == 3) trimap_shape = [image_input.shape[0]] + [1] + list(image_input.shape)[2:] trimap_input = torch.randint(0, 3, trimap_shape, dtype=torch.uint8) self.assertIsInstance(trimap_input, torch.Tensor) self.assertTrue(trimap_input.shape[1] == 1) for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility and that correct dimensions got merged self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-3] == 4) def test_call_pil(self): # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) encoded_images = image_processing(images=image, trimaps=trimap, return_tensors="pt").pixel_values # Verify that width and height can be divided by size_divisibility and that correct dimensions got merged self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-3] == 4) def test_call_numpy_4_channels(self): # Test that can process images which have an arbitrary number of channels # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input (image processor does not support batched inputs) image = image_inputs[0] trimap = np.random.randint(0, 3, size=image.shape[:2]) for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) encoded_images = image_processor( images=image, trimaps=trimap, input_data_format="channels_last", image_mean=(0.0, 0.0, 0.0, 0.0), image_std=(1.0, 1.0, 1.0, 1.0), return_tensors="pt", ).pixel_values # Verify that width and height can be divided by size_divisibility and that correct dimensions got merged self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-3] == 5) def test_padding_slow(self): image_processing = self.image_processing_class(**self.image_processor_dict) image = np.random.randn(3, 249, 491) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512) image = np.random.randn(3, 249, 512) images = image_processing.pad_image(image) assert images.shape == (3, 256, 512) def test_padding_fast(self): # extra test because name is different for fast image processor image_processing = self.fast_image_processing_class(**self.image_processor_dict) image = torch.rand(3, 249, 491) images = image_processing._pad_image(image) assert images.shape == (3, 256, 512) image = torch.rand(3, 249, 512) images = image_processing._pad_image(image) assert images.shape == (3, 256, 512) def test_image_processor_preprocess_arguments(self): # vitmatte require additional trimap input for image_processor # that is why we override original common test for i, image_processing_class in enumerate(self.image_processor_list): image_processor = image_processing_class(**self.image_processor_dict) image = self.image_processor_tester.prepare_image_inputs()[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) # Type validation will fail for fast processors only (for now) if image_processing_class.__name__.endswith("Fast"): with self.assertRaises(TypeError): image_processor(image, trimaps=trimap, extra_argument=True) else: # Else we just consume extra kwargs and raise a warning with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") image_processor(image, trimaps=trimap, extra_argument=True) messages = " ".join([str(w.message) for w in raised_warnings]) self.assertGreaterEqual(len(raised_warnings), 1) self.assertIn("extra_argument", messages) @unittest.skip(reason="Many failing cases. This test needs a more deep investigation.") def test_fast_is_faster_than_slow(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping speed test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping speed test as one of the image processors is not defined") def measure_time(image_processor, images, trimaps): # Warmup for _ in range(5): _ = image_processor(images, trimaps=trimaps, return_tensors="pt") all_times = [] for _ in range(10): start = time.time() _ = image_processor(images, trimaps=trimaps, return_tensors="pt") all_times.append(time.time() - start) # Take the average of the fastest 3 runs avg_time = sum(sorted(all_times[:3])) / 3.0 return avg_time dummy_images = torch.randint(0, 255, (4, 3, 400, 800), dtype=torch.uint8) dummy_trimaps = torch.randint(0, 3, (4, 400, 800), dtype=torch.uint8) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) fast_time = measure_time(image_processor_fast, dummy_images, dummy_trimaps) slow_time = measure_time(image_processor_slow, dummy_images, dummy_trimaps) self.assertLessEqual(fast_time, slow_time) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg")) dummy_trimap = np.random.randint(0, 3, size=dummy_image.size[::-1]) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, trimaps=dummy_trimap, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, trimaps=dummy_trimap, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) def test_slow_fast_equivalence_batched(self): # this only checks on equal resolution, since the slow processor doesn't work otherwise if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) dummy_trimaps = [np.random.randint(0, 3, size=image.shape[1:]) for image in dummy_images] image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, trimaps=dummy_trimaps, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, trimaps=dummy_trimaps, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) @slow @require_torch_accelerator @require_vision @pytest.mark.torch_compile_test def test_can_compile_fast_image_processor(self): # override as trimaps are needed for the image processor if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8) dummy_trimap = np.random.randint(0, 3, size=input_image.shape[1:]) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, dummy_trimap, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, dummy_trimap, device=torch_device, return_tensors="pt") torch.testing.assert_close(output_eager.pixel_values, output_compiled.pixel_values, rtol=1e-4, atol=1e-4)
VitMatteImageProcessingTest
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_eks.py
{ "start": 19126, "end": 23536 }
class ____: def setup_method(self) -> None: self.create_nodegroup_params = CreateNodegroupParams( cluster_name=CLUSTER_NAME, nodegroup_name=NODEGROUP_NAME, nodegroup_subnets=SUBNET_IDS, nodegroup_role_arn=NODEROLE_ARN[1], ) @pytest.mark.parametrize( "create_nodegroup_kwargs", [ pytest.param(None, id="without nodegroup kwargs"), pytest.param(CREATE_NODEGROUP_KWARGS, id="with nodegroup kwargs"), ], ) @mock.patch.object(Waiter, "wait") @mock.patch.object(EksHook, "create_nodegroup") def test_execute_when_nodegroup_does_not_already_exist( self, mock_create_nodegroup, mock_waiter, create_nodegroup_kwargs ): op_kwargs = {**self.create_nodegroup_params} if create_nodegroup_kwargs: op_kwargs["create_nodegroup_kwargs"] = create_nodegroup_kwargs parameters = {**self.create_nodegroup_params, **create_nodegroup_kwargs} else: assert "create_nodegroup_params" not in op_kwargs parameters = self.create_nodegroup_params operator = EksCreateNodegroupOperator(task_id=TASK_ID, **op_kwargs) operator.execute({}) mock_create_nodegroup.assert_called_with(**convert_keys(parameters)) mock_waiter.assert_not_called() @pytest.mark.parametrize( "create_nodegroup_kwargs", [ pytest.param(None, id="without nodegroup kwargs"), pytest.param(CREATE_NODEGROUP_KWARGS, id="with nodegroup kwargs"), ], ) @mock.patch.object(Waiter, "wait") @mock.patch.object(EksHook, "create_nodegroup") def test_execute_with_wait_when_nodegroup_does_not_already_exist( self, mock_create_nodegroup, mock_waiter, create_nodegroup_kwargs ): op_kwargs = {**self.create_nodegroup_params} if create_nodegroup_kwargs: op_kwargs["create_nodegroup_kwargs"] = create_nodegroup_kwargs parameters = {**self.create_nodegroup_params, **create_nodegroup_kwargs} else: assert "create_nodegroup_params" not in op_kwargs parameters = self.create_nodegroup_params operator = EksCreateNodegroupOperator(task_id=TASK_ID, **op_kwargs, wait_for_completion=True) operator.execute({}) mock_create_nodegroup.assert_called_with(**convert_keys(parameters)) mock_waiter.assert_called_with(mock.ANY, clusterName=CLUSTER_NAME, nodegroupName=NODEGROUP_NAME) assert_expected_waiter_type(mock_waiter, "NodegroupActive") @mock.patch.object(EksHook, "create_nodegroup") def test_create_nodegroup_deferrable(self, mock_create_nodegroup): mock_create_nodegroup.return_value = True op_kwargs = {**self.create_nodegroup_params} operator = EksCreateNodegroupOperator( task_id=TASK_ID, **op_kwargs, deferrable=True, ) with pytest.raises(TaskDeferred) as exc: operator.execute({}) assert isinstance(exc.value.trigger, EksCreateNodegroupTrigger) def test_create_nodegroup_deferrable_versus_wait_for_completion(self): op_kwargs = {**self.create_nodegroup_params} operator = EksCreateNodegroupOperator( task_id=TASK_ID, **op_kwargs, deferrable=True, wait_for_completion=True, ) assert operator.wait_for_completion is False operator = EksCreateNodegroupOperator( task_id=TASK_ID, **op_kwargs, deferrable=False, wait_for_completion=True, ) assert operator.wait_for_completion is True def test_template_fields(self): op_kwargs = {**self.create_nodegroup_params} op = EksCreateNodegroupOperator(task_id=TASK_ID, **op_kwargs) validate_template_fields(op) def test_init_with_region(self): op_kwargs = {**self.create_nodegroup_params} with pytest.warns(AirflowProviderDeprecationWarning) as m: m.operator = EksCreateNodegroupOperator( task_id=TASK_ID, **op_kwargs, region="us-east-2", ) assert m.operator.region_name == "us-east-2"
TestEksCreateNodegroupOperator
python
google__jax
jax/_src/error_check.py
{ "start": 2561, "end": 8774 }
class ____: """Redefine the internal error state based on the mesh in the context. When using JAX in multi-device environments in explicit mode, error tracking needs to be properly aligned with the device mesh. This context manager ensures that the internal error state is correctly initialized based on the current mesh configuration. This context manager should be used when starting a multi-device computation, or when switching between different device meshes. On entering the context, it initializes a new error state based on the mesh in the context. On exiting the context, it restores the previous error state. """ __slots__ = ("old_ref",) def __init__(self): self.old_ref = None def __enter__(self): self.old_ref = _error_storage.ref with core.eval_context(): _initialize_error_code_ref() return self def __exit__(self, exc_type, exc_value, traceback): _error_storage.ref = self.old_ref def set_error_if(pred: Array, /, msg: str) -> None: """Set the internal error state if any element of `pred` is `True`. This function is used inside JAX computations to detect runtime errors without immediately halting execution. When this function is traced (e.g., inside :func:`jax.jit`), the corresponding error message and its traceback are recorded. At execution time, if `pred` contains any `True` values, the error state is set, but execution continues without interruption. The recorded error can later be raised using :func:`raise_if_error`. If the error state has already been set, subsequent errors are ignored and will not override the existing error. For multi-device environments, in explicit mode, users must call :func:`error_checking_context` to initialize a new error tracking state that matches the device mesh. In auto mode, implicit cross-device communication may occur inside this function, which could impact performance. A warning is issued in such cases. When exporting a function with `jax.export`, error checking must be explicitly wrapped using :func:`wrap_for_export` before export and :func:`unwrap_from_import` after import. Args: pred: A JAX boolean array. If any element of `pred` is `True`, the internal error state will be set. msg: The corresponding error message to be raised later. """ # TODO(jakevdp): remove this import and express the following using lax APIs. import jax.numpy as jnp # pytype: disable=import-error if _error_storage.ref is None: with core.eval_context(): _initialize_error_code_ref() assert _error_storage.ref is not None # Get the traceback. traceback = source_info_util.current().traceback assert traceback is not None traceback = traceback.as_python_traceback() assert isinstance(traceback, TracebackType) traceback = traceback_util.filter_traceback(traceback) assert isinstance(traceback, TracebackType) with _error_list_lock: new_error_code = np.uint32(len(_error_list)) _error_list.append((msg, traceback)) out_sharding = core.typeof(_error_storage.ref).sharding in_sharding: NamedSharding = core.typeof(pred).sharding # Reduce `pred`. if all(dim is None for dim in out_sharding.spec): # single-device case. pred = pred.any() else: # multi-device case. has_auto_axes = mesh_lib.AxisType.Auto in in_sharding.mesh.axis_types if has_auto_axes: # auto mode. warnings.warn( "When at least one mesh axis of `pred` is in auto mode, calling" " `set_error_if` will cause implicit communication between devices." " To avoid this, consider converting the mesh axis in auto mode to" " explicit mode.", RuntimeWarning, ) pred = pred.any() # reduce to a single scalar else: # explicit mode. if out_sharding.mesh != in_sharding.mesh: raise ValueError( "The error code state and the predicate must be on the same mesh, " f"but got {out_sharding.mesh} and {in_sharding.mesh} respectively. " "Please use `with error_checking_context()` to redefine the error " "code state based on the mesh." ) pred = shard_map.shard_map( partial(jnp.any, keepdims=True), mesh=out_sharding.mesh, in_specs=in_sharding.spec, out_specs=out_sharding.spec, )(pred) # perform per-device reduction error_code = _error_storage.ref[...] should_update = jnp.logical_and(error_code == jnp.uint32(_NO_ERROR), pred) error_code = jnp.where(should_update, new_error_code, error_code) # TODO(ayx): support vmap and shard_map. _error_storage.ref[...] = error_code def raise_if_error() -> None: """Raise an exception if the internal error state is set. This function should be called after a computation completes to check for any errors that were marked during execution via `set_error_if()`. If an error exists, it raises a `JaxValueError` with the corresponding error message. This function should not be called inside a traced function (e.g., inside :func:`jax.jit`). Doing so will raise a `ValueError`. Raises: JaxValueError: If the internal error state is set. ValueError: If called within a traced JAX function. """ if _error_storage.ref is None: # if not initialized, do nothing return error_code = _error_storage.ref[...].min() # reduce to a single error code if isinstance(error_code, core.Tracer): raise ValueError( "raise_if_error() should not be called within a traced context, such as" " within a jitted function." ) if error_code == np.uint32(_NO_ERROR): return _error_storage.ref[...] = lax.full( _error_storage.ref.shape, np.uint32(_NO_ERROR), sharding=_error_storage.ref.sharding, ) # clear the error code with _error_list_lock: msg, traceback = _error_list[error_code] if isinstance(traceback, str): # from imported AOT functions exc = JaxValueError( f"{msg}\nThe original traceback is shown below:\n{traceback}" ) raise exc else: exc = JaxValueError(msg) raise exc.with_traceback(traceback) @dataclasses.dataclass(frozen=True)
error_checking_context
python
ansible__ansible
lib/ansible/errors/__init__.py
{ "start": 8123, "end": 8565 }
class ____(AnsibleRuntimeError, _error_utils.ContributesToTaskResult): """ The transport / connection_plugin had a fatal error. This exception provides a result dictionary via the ContributesToTaskResult mixin. """ @property def result_contribution(self) -> t.Mapping[str, object]: return dict(unreachable=True) @property def omit_failed_key(self) -> bool: return True
AnsibleConnectionFailure
python
sphinx-doc__sphinx
sphinx/ext/autosummary/__init__.py
{ "start": 3814, "end": 5743 }
class ____(nodes.comment): pass def autosummary_table_visit_html( self: HTML5Translator, node: autosummary_table ) -> None: """Make the first column of the table non-breaking.""" try: table = cast('nodes.table', node[0]) tgroup = cast('nodes.tgroup', table[0]) tbody = cast('nodes.tbody', tgroup[-1]) rows = cast('list[nodes.row]', tbody) for row in rows: col1_entry = cast('nodes.entry', row[0]) par = cast('nodes.paragraph', col1_entry[0]) for j, subnode in enumerate(list(par)): if isinstance(subnode, nodes.Text): new_text = subnode.astext().replace(' ', '\u00a0') par[j] = nodes.Text(new_text) except IndexError: pass # -- autodoc integration ------------------------------------------------------- def _get_documenter(obj: Any, parent: Any) -> _AutodocObjType: """Get the best object type suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is another Python object (e.g. a module or a class) to which *obj* belongs. """ if inspect.ismodule(obj): return 'module' if parent is None or inspect.ismodule(parent): parent_obj_type = 'module' else: parent_opt = _best_object_type_for_member( member=parent, member_name='', is_attr=False, parent_obj_type='module', parent_props=None, ) parent_obj_type = parent_opt if parent_opt is not None else 'data' if obj_type := _best_object_type_for_member( member=obj, member_name='', is_attr=False, parent_obj_type=parent_obj_type, parent_props=None, ): return obj_type return 'data' # -- .. autosummary:: ----------------------------------------------------------
autosummary_table
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataplex.py
{ "start": 20940, "end": 21939 }
class ____: @mock.patch(HOOK_STR) def test_execute(self, hook_mock): op = DataplexDeleteAssetOperator( task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, zone_id=ZONE_ID, asset_id=ASSET_ID, api_version=API_VERSION, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) op.execute(context=mock.MagicMock()) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, api_version=API_VERSION, impersonation_chain=IMPERSONATION_CHAIN, ) hook_mock.return_value.delete_asset.assert_called_once_with( project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, zone_id=ZONE_ID, asset_id=ASSET_ID, retry=DEFAULT, timeout=None, metadata=(), )
TestDataplexDeleteAssetOperator
python
huggingface__transformers
src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
{ "start": 14835, "end": 15358 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states
RobertaPreLayerNormSelfOutput
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1031164, "end": 1031723 }
class ____(sgqlc.types.Type): """Autogenerated return type of UpdateIpAllowListForInstalledAppsEnabledSetting """ __schema__ = github_schema __field_names__ = ("client_mutation_id", "owner") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" owner = sgqlc.types.Field("IpAllowListOwner", graphql_name="owner") """The IP allow list owner on which the setting was updated."""
UpdateIpAllowListForInstalledAppsEnabledSettingPayload
python
spyder-ide__spyder
external-deps/python-lsp-server/pylsp/lsp.py
{ "start": 674, "end": 745 }
class ____: Text = 1 Read = 2 Write = 3
DocumentHighlightKind
python
django__django
tests/fixtures_regress/models.py
{ "start": 8423, "end": 8639 }
class ____(BaseNKModel): def natural_key(self): return (self.data,) # Fake the dependency for a circularity natural_key.dependencies = ["fixtures_regress.M2MComplexCircular2A"]
M2MComplexCircular2B
python
ray-project__ray
python/ray/train/huggingface/transformers/_transformers_utils.py
{ "start": 3431, "end": 5861 }
class ____(IterableDataset): """Wrapper class for ray data iterables.""" def __init__(self, data_iterable) -> None: super().__init__() self.data_iterable = data_iterable def __iter__(self) -> Iterator: return iter(self.data_iterable) @PublicAPI(stability="beta") def prepare_trainer(trainer: "Trainer") -> "Trainer": """Prepare your HuggingFace Transformer Trainer for Ray Train. This utility function enable the trainer integrates with Ray Data Integration. Internally, it overrides the `get_train_dataloader` and `get_eval_dataloader` methods and inject the data integration logics if the `train_dataset` and `eval_dataset` are Ray Data Iterables. """ if TRANSFORMERS_IMPORT_ERROR is not None: raise TRANSFORMERS_IMPORT_ERROR base_trainer_class: Type[transformers.trainer.Trainer] = trainer.__class__ class RayTransformersTrainer(base_trainer_class): """A Wrapper of `transformers.Trainer` for Ray Data Integration.""" def get_train_dataloader(self) -> DataLoader: if isinstance(self.train_dataset, _IterableFromIterator): dataset = RayTorchIterableDataset(self.train_dataset) return DataLoader(dataset, batch_size=1, collate_fn=lambda x: x[0]) else: return super().get_train_dataloader() def get_eval_dataloader( self, eval_dataset: Optional[Union[str, Dataset]] = None ) -> DataLoader: if eval_dataset is None: eval_dataset = self.eval_dataset if ( isinstance(eval_dataset, str) and isinstance(self.eval_dataset, dict) and isinstance(self.eval_dataset[eval_dataset], _IterableFromIterator) ): dataset = RayTorchIterableDataset(self.eval_dataset[eval_dataset]) return DataLoader(dataset, batch_size=1, collate_fn=lambda x: x[0]) elif isinstance(eval_dataset, _IterableFromIterator): dataset = RayTorchIterableDataset(eval_dataset) return DataLoader(dataset, batch_size=1, collate_fn=lambda x: x[0]) else: return super().get_eval_dataloader(eval_dataset) trainer.__class__ = RayTransformersTrainer record_extra_usage_tag(TagKey.TRAIN_TRANSFORMERS_PREPARE_TRAINER, "1") return trainer
RayTorchIterableDataset
python
catalyst-team__catalyst
catalyst/contrib/datasets/cifar.py
{ "start": 1565, "end": 3554 }
class ____(data.Dataset): _repr_indent = 4 def __init__( self, root: str, transforms: Optional[Callable] = None, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, ) -> None: if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root has_transforms = transforms is not None has_separate_transform = transform is not None or target_transform is not None if has_transforms and has_separate_transform: raise ValueError( "Only transforms or transform/target_transform can " "be passed as argument" ) # for backwards-compatibility self.transform = transform self.target_transform = target_transform if has_separate_transform: transforms = StandardTransform(transform, target_transform) self.transforms = transforms def __getitem__(self, index: int) -> Any: raise NotImplementedError def __len__(self) -> int: raise NotImplementedError def __repr__(self) -> str: head = "Dataset " + self.__class__.__name__ body = ["Number of datapoints: {}".format(self.__len__())] if self.root is not None: body.append("Root location: {}".format(self.root)) body += self.extra_repr().splitlines() if hasattr(self, "transforms") and self.transforms is not None: body += [repr(self.transforms)] lines = [head] + [" " * self._repr_indent + line for line in body] return "\n".join(lines) def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: lines = transform.__repr__().splitlines() return ["{}{}".format(head, lines[0])] + [ "{}{}".format(" " * len(head), line) for line in lines[1:] ] def extra_repr(self) -> str: return ""
VisionDataset
python
huggingface__transformers
tests/models/cohere2_vision/test_modeling_cohere2_vision.py
{ "start": 5775, "end": 17675 }
class ____(unittest.TestCase): def setUp(self): self.model_checkpoint = "CohereLabs/command-a-vision-07-2025" def tearDown(self): cleanup(torch_device, gc_collect=True) def get_model(self, dummy=True): device_type, major, _ = get_device_properties() dtype = torch.float16 # too large to fit into A10 config = Cohere2VisionConfig.from_pretrained(self.model_checkpoint) if dummy: config.text_config.num_hidden_layers = 4 config.text_config.layer_types = config.text_config.layer_types[:4] model = Cohere2VisionForConditionalGeneration.from_pretrained( self.model_checkpoint, config=config, dtype=dtype, device_map="auto", ) return model @slow @require_torch_accelerator def test_model_integration_forward(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model(dummy=False) messages = [ { "role": "user", "content": [ {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"}, {"type": "text", "text": "Please describe the image explicitly."}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) # Forward with torch.inference_mode(): output = model(**inputs) actual_logits = output.logits[0, -1, :5].cpu() EXPECTED_LOGITS = Expectations( { ("xpu", 3): [2.4297, 1.6836, 1.8779, 2.1895, 1.9395], # 4-bit ("cuda", 7): [0.1097, 0.3481, 3.8340, 9.7969, 2.0488], ("cuda", 8): [2.4277, 1.6875, 1.8789, 2.1875, 1.9375], } ) # fmt: skip expected_logits = torch.tensor(EXPECTED_LOGITS.get_expectation(), dtype=torch.float16) self.assertTrue( torch.allclose(actual_logits, expected_logits, atol=0.1), f"Actual logits: {actual_logits}" f"\nExpected logits: {expected_logits}" f"\nDifference: {torch.abs(actual_logits - expected_logits)}", ) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_model_integration_generate_text_only(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() messages = [ { "role": "user", "content": [ {"type": "text", "text": "Write a haiku"}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) with torch.no_grad(): generate_ids = model.generate(**inputs, max_new_tokens=10, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): "<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>", ("cuda", 8): "<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>", } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_model_integration_generate_chat_template(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() messages = [ { "role": "user", "content": [ {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"}, {"type": "text", "text": "Please describe the image explicitly."}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.float16) with torch.no_grad(): generate_ids = model.generate(**inputs, max_new_tokens=10, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', ("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @slow @require_torch_accelerator def test_model_integration_batched_generate(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model(dummy=False) # Prepare inputs messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg", }, {"type": "text", "text": "Describe this image"}, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device, dtype=torch.float16) output = model.generate(**inputs, do_sample=False, max_new_tokens=5) # Check first output decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): 'Dock stretches to calm', ("cuda", 8): 'Dock stretches to calm', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): 'The image depicts a', ("cuda", 8): 'The image depicts a', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @slow @require_torch_accelerator @require_deterministic_for_xpu def test_model_integration_batched_generate_multi_image(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) model = self.get_model() # Prepare inputs messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, { "type": "text", "text": "These images depict two different landmarks. Can you identify them?", }, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device, dtype=torch.float16) output = model.generate(**inputs, do_sample=False, max_new_tokens=10) # Check first output decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True) # Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232 expected_outputs = Expectations( { ("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', ("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(output[1, inputs["input_ids"].shape[1] :], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', ("cuda", 8): '<|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|><|CHATBOT_TOKEN|>', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", )
Cohere2IntegrationTest
python
pytorch__pytorch
torch/cuda/__init__.py
{ "start": 55162, "end": 55711 }
class ____(_LegacyStorage): @classmethod def from_buffer(cls, *args, **kwargs): _warn_typed_storage_removal() raise RuntimeError("from_buffer: Not available for CUDA storage") @classmethod def _new_with_weak_ptr(cls, *args, **kwargs): raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage") @classmethod def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None): raise RuntimeError("_new_shared_filename: Not available for CUDA storage")
_CudaLegacyStorage
python
wandb__wandb
wandb/vendor/pygments/lexers/c_like.py
{ "start": 6453, "end": 10577 }
class ____(RegexLexer): """ For Vala source code with preprocessor directives. .. versionadded:: 1.1 """ name = 'Vala' aliases = ['vala', 'vapi'] filenames = ['*.vala', '*.vapi'] mimetypes = ['text/x-vala'] tokens = { 'whitespace': [ (r'^\s*#if\s+0', Comment.Preproc, 'if0'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), ], 'statements': [ (r'[L@]?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(?s)""".*?"""', String), # verbatim strings (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])', bygroups(Punctuation, Name.Decorator, Punctuation)), # TODO: "correctly" parse complex code attributes (r'(\[)(CCode|(?:Integer|Floating)Type)', bygroups(Punctuation, Name.Decorator)), (r'[()\[\],.]', Punctuation), (words(( 'as', 'base', 'break', 'case', 'catch', 'construct', 'continue', 'default', 'delete', 'do', 'else', 'enum', 'finally', 'for', 'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params', 'return', 'set', 'sizeof', 'switch', 'this', 'throw', 'try', 'typeof', 'while', 'yield'), suffix=r'\b'), Keyword), (words(( 'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern', 'inline', 'internal', 'override', 'owned', 'private', 'protected', 'public', 'ref', 'requires', 'signal', 'static', 'throws', 'unowned', 'var', 'virtual', 'volatile', 'weak', 'yields'), suffix=r'\b'), Keyword.Declaration), (r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text), 'namespace'), (r'(class|errordomain|interface|struct)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), (r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)), # void is an actual keyword, others are in glib-2.0.vapi (words(( 'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16', 'int32', 'int64', 'long', 'short', 'size_t', 'ssize_t', 'string', 'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64', 'ulong', 'unichar', 'ushort'), suffix=r'\b'), Keyword.Type), (r'(true|false|null)\b', Name.Builtin), ('[a-zA-Z_]\w*', Name), ], 'root': [ include('whitespace'), default('statement'), ], 'statement': [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ], 'class': [ (r'[a-zA-Z_]\w*', Name.Class, '#pop') ], 'namespace': [ (r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop') ], }
ValaLexer
python
pyca__cryptography
src/cryptography/hazmat/decrepit/ciphers/algorithms.py
{ "start": 2618, "end": 2884 }
class ____(BlockCipherAlgorithm): name = "RC2" block_size = 64 key_sizes = frozenset([128]) def __init__(self, key: bytes): self.key = _verify_key_size(self, key) @property def key_size(self) -> int: return len(self.key) * 8
RC2
python
pypa__warehouse
warehouse/utils/crypto.py
{ "start": 674, "end": 796 }
class ____(_TimestampSigner): default_digest_method = hashlib.sha512 default_key_derivation = "hmac"
TimestampSigner
python
plotly__plotly.py
plotly/graph_objs/scatterternary/hoverlabel/_font.py
{ "start": 233, "end": 17179 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scatterternary.hoverlabel" _path_str = "scatterternary.hoverlabel.font" _valid_props = { "color", "colorsrc", "family", "familysrc", "lineposition", "linepositionsrc", "shadow", "shadowsrc", "size", "sizesrc", "style", "stylesrc", "textcase", "textcasesrc", "variant", "variantsrc", "weight", "weightsrc", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for `color`. The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"] @colorsrc.setter def colorsrc(self, val): self["colorsrc"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def familysrc(self): """ Sets the source reference on Chart Studio Cloud for `family`. The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"] @familysrc.setter def familysrc(self, val): self["familysrc"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') - A list or array of the above Returns ------- Any|numpy.ndarray """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def linepositionsrc(self): """ Sets the source reference on Chart Studio Cloud for `lineposition`. The 'linepositionsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["linepositionsrc"] @linepositionsrc.setter def linepositionsrc(self, val): self["linepositionsrc"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def shadowsrc(self): """ Sets the source reference on Chart Studio Cloud for `shadow`. The 'shadowsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["shadowsrc"] @shadowsrc.setter def shadowsrc(self, val): self["shadowsrc"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for `size`. The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"] @sizesrc.setter def sizesrc(self, val): self["sizesrc"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def stylesrc(self): """ Sets the source reference on Chart Studio Cloud for `style`. The 'stylesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["stylesrc"] @stylesrc.setter def stylesrc(self, val): self["stylesrc"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def textcasesrc(self): """ Sets the source reference on Chart Studio Cloud for `textcase`. The 'textcasesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["textcasesrc"] @textcasesrc.setter def textcasesrc(self, val): self["textcasesrc"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def variantsrc(self): """ Sets the source reference on Chart Studio Cloud for `variant`. The 'variantsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["variantsrc"] @variantsrc.setter def variantsrc(self, val): self["variantsrc"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def weightsrc(self): """ Sets the source reference on Chart Studio Cloud for `weight`. The 'weightsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["weightsrc"] @weightsrc.setter def weightsrc(self, val): self["weightsrc"] = val @property def _prop_descriptions(self): return """\ color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. """ def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, lineposition=None, linepositionsrc=None, shadow=None, shadowsrc=None, size=None, sizesrc=None, style=None, stylesrc=None, textcase=None, textcasesrc=None, variant=None, variantsrc=None, weight=None, weightsrc=None, **kwargs, ): """ Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatterternary .hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scatterternary.hoverlabel.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.scatterternary.hoverlabel.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("colorsrc", arg, colorsrc) self._set_property("family", arg, family) self._set_property("familysrc", arg, familysrc) self._set_property("lineposition", arg, lineposition) self._set_property("linepositionsrc", arg, linepositionsrc) self._set_property("shadow", arg, shadow) self._set_property("shadowsrc", arg, shadowsrc) self._set_property("size", arg, size) self._set_property("sizesrc", arg, sizesrc) self._set_property("style", arg, style) self._set_property("stylesrc", arg, stylesrc) self._set_property("textcase", arg, textcase) self._set_property("textcasesrc", arg, textcasesrc) self._set_property("variant", arg, variant) self._set_property("variantsrc", arg, variantsrc) self._set_property("weight", arg, weight) self._set_property("weightsrc", arg, weightsrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/operate/configuration/run_config/op_example/resources.py
{ "start": 23, "end": 202 }
class ____(dg.Config): person_name: str @dg.definitions def resources() -> dg.Definitions: return dg.Definitions(resources={"config": MyOpConfig(person_name="")})
MyOpConfig
python
FactoryBoy__factory_boy
factory/base.py
{ "start": 22748, "end": 23223 }
class ____(Factory): """Factory for dictionary-like classes.""" class Meta: abstract = True @classmethod def _build(cls, model_class, *args, **kwargs): if args: raise ValueError( "DictFactory %r does not support Meta.inline_args." % cls) return model_class(**kwargs) @classmethod def _create(cls, model_class, *args, **kwargs): return cls._build(model_class, *args, **kwargs)
BaseDictFactory
python
HypothesisWorks__hypothesis
hypothesis-python/tests/typing_extensions/test_backported_types.py
{ "start": 7770, "end": 9198 }
class ____(TypedDict): a: ReadOnly[Required[int]] b: NotRequired[Annotated[ReadOnly[int], "metadata"]] c: Annotated[ReadOnly[NotRequired[str]], "metadata"] @pytest.mark.parametrize( "check,condition", [ pytest.param( assert_all_examples, lambda novel: "a" in novel, id="a-is-required", ), pytest.param(find_any, lambda novel: "b" in novel, id="b-may-be-present"), pytest.param(find_any, lambda novel: "b" not in novel, id="b-may-be-absent"), pytest.param(find_any, lambda novel: "c" in novel, id="c-may-be-present"), pytest.param(find_any, lambda novel: "c" not in novel, id="c-may-be-absent"), ], ) def test_required_and_not_required_keys_deeply_nested(check, condition): check(from_type(DeeplyNestedQualifiers), condition) def test_typeddict_error_msg(): with pytest.raises(TypeError, match="is not valid as type argument"): class Foo(TypedDict): attr: Required with pytest.raises(TypeError, match="is not valid as type argument"): class Bar(TypedDict): attr: NotRequired with pytest.raises(TypeError, match="is not valid as type argument"): class Baz(TypedDict): attr: ReadOnly def test_literal_string_is_just_a_string(): assert_all_examples(from_type(LiteralString), lambda thing: isinstance(thing, str))
DeeplyNestedQualifiers
python
dask__dask
dask/tests/test_core.py
{ "start": 5030, "end": 7771 }
class ____: hit_eq = 0 def __eq__(self, other): self.hit_eq += 1 return False def test_subs_no_key_data_eq(): # Numpy throws a deprecation warning on bool(array == scalar), which # pollutes the terminal. This test checks that `subs` never tries to # compare keys (scalars) with values (which could be arrays)`subs` never # tries to compare keys (scalars) with values (which could be arrays). a = MutateOnEq() subs(a, "x", 1) assert a.hit_eq == 0 subs((add, a, "x"), "x", 1) assert a.hit_eq == 0 def test_subs_with_unfriendly_eq(): try: import numpy as np except ImportError: return else: task = (np.sum, np.array([1, 2])) assert (subs(task, (4, 5), 1) == task) is True class MyException(Exception): pass class F: def __eq__(self, other): raise MyException() task = F() assert subs(task, 1, 2) is task def test_subs_with_surprisingly_friendly_eq(): try: import pandas as pd except ImportError: return else: df = pd.DataFrame() assert subs(df, "x", 1) is df def test_subs_arbitrary_key(): key = (1.2, "foo", (3,)) assert subs((id, key), key, 1) == (id, 1) def test_quote(): literals = [[1, 2, 3], (add, 1, 2), [1, [2, 3]], (add, 1, (add, 2, 3)), {"x": "x"}] for l in literals: assert get({"x": quote(l)}, "x") == l def test_literal_serializable(): l = literal((add, 1, 2)) assert pickle.loads(pickle.dumps(l)).data == (add, 1, 2) def test_getcycle(): dsk = { 0: [7, 13, 7, 9, 13, 3, 9, 18, 18, 17], 1: [14, 14, 12, 1, 9, 16, 4, 5, 9, 8], 2: [3, 1, 7, 7, 2, 0, 0, 6, 3, 2], 3: [4, 8, 3, 14, 15, 19, 14, 1, 9, 1], 4: [9, 13, 19, 4, 16, 8, 11, 1, 16, 1], 5: [9, 8, 12, 13, 10, 13, 19, 3, 18, 18], 6: [10, 2, 13, 16, 3, 12, 7, 16, 5, 17], 7: [16, 8, 6, 4, 4, 10, 3, 1, 13, 10], 8: [11, 4, 12, 10, 14, 6, 18, 15, 16, 12], 9: [17, 18, 5, 16, 19, 16, 3, 6, 16, 18], } assert len(getcycle(dsk, list(dsk))) <= 3 # 7->6->7 dsk = { 0: [1, 27, 6, 25, 30, 16, 1, 1, 35, 17], 1: [35, 22, 21, 31, 2, 28, 36, 0, 29, 29], 2: [14, 27, 10, 1, 38, 18, 28, 28, 6, 0], 3: [0, 37, 7, 16, 38, 28, 34, 13, 30, 10], 4: [22, 22, 27, 13, 29, 36, 22, 9, 39, 19], 5: [38, 7, 18, 17, 33, 5, 29, 11, 23, 30], 6: [3, 30, 28, 38, 4, 13, 14, 27, 29, 38], 7: [22, 27, 12, 2, 22, 16, 34, 15, 18, 16], 8: [36, 21, 24, 22, 28, 38, 21, 2, 4, 24], 9: [38, 32, 38, 7, 31, 34, 39, 20, 30, 18], } assert len(getcycle(dsk, list(dsk))) <= 4 # 0->1->2->0
MutateOnEq
python
pypa__twine
twine/package.py
{ "start": 3078, "end": 4238 }
class ____(TypedDict, total=False): # Metadata 1.0 - PEP 241 metadata_version: str name: str version: str platform: List[str] summary: str description: str keywords: List[str] home_page: str author: str author_email: str license: str # Metadata 1.1 - PEP 314 supported_platform: List[str] download_url: str classifiers: List[str] requires: List[str] provides: List[str] obsoletes: List[str] # Metadata 1.2 - PEP 345 maintainer: str maintainer_email: str requires_dist: List[str] provides_dist: List[str] obsoletes_dist: List[str] requires_python: str requires_external: List[str] project_urls: Dict[str, str] # Metadata 2.1 - PEP 566 description_content_type: str provides_extra: List[str] # Metadata 2.2 - PEP 643 dynamic: List[str] # Metadata 2.4 - PEP 639 license_expression: str license_file: List[str] # Additional metadata comment: str pyversion: str filetype: str gpg_signature: Tuple[str, bytes] attestations: str sha256_digest: str blake2_256_digest: str
PackageMetadata
python
pandas-dev__pandas
pandas/tests/tseries/offsets/test_quarter.py
{ "start": 958, "end": 4889 }
class ____: def test_repr(self): expected = "<QuarterBegin: startingMonth=3>" assert repr(QuarterBegin()) == expected expected = "<QuarterBegin: startingMonth=3>" assert repr(QuarterBegin(startingMonth=3)) == expected expected = "<QuarterBegin: startingMonth=1>" assert repr(QuarterBegin(startingMonth=1)) == expected def test_offset_corner_case(self): # corner offset = QuarterBegin(n=-1, startingMonth=1) assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1) offset_cases = [] offset_cases.append( ( QuarterBegin(startingMonth=1), { datetime(2007, 12, 1): datetime(2008, 1, 1), datetime(2008, 1, 1): datetime(2008, 4, 1), datetime(2008, 2, 15): datetime(2008, 4, 1), datetime(2008, 2, 29): datetime(2008, 4, 1), datetime(2008, 3, 15): datetime(2008, 4, 1), datetime(2008, 3, 31): datetime(2008, 4, 1), datetime(2008, 4, 15): datetime(2008, 7, 1), datetime(2008, 4, 1): datetime(2008, 7, 1), }, ) ) offset_cases.append( ( QuarterBegin(startingMonth=2), { datetime(2008, 1, 1): datetime(2008, 2, 1), datetime(2008, 1, 31): datetime(2008, 2, 1), datetime(2008, 1, 15): datetime(2008, 2, 1), datetime(2008, 2, 29): datetime(2008, 5, 1), datetime(2008, 3, 15): datetime(2008, 5, 1), datetime(2008, 3, 31): datetime(2008, 5, 1), datetime(2008, 4, 15): datetime(2008, 5, 1), datetime(2008, 4, 30): datetime(2008, 5, 1), }, ) ) offset_cases.append( ( QuarterBegin(startingMonth=1, n=0), { datetime(2008, 1, 1): datetime(2008, 1, 1), datetime(2008, 12, 1): datetime(2009, 1, 1), datetime(2008, 1, 1): datetime(2008, 1, 1), datetime(2008, 2, 15): datetime(2008, 4, 1), datetime(2008, 2, 29): datetime(2008, 4, 1), datetime(2008, 3, 15): datetime(2008, 4, 1), datetime(2008, 3, 31): datetime(2008, 4, 1), datetime(2008, 4, 15): datetime(2008, 7, 1), datetime(2008, 4, 30): datetime(2008, 7, 1), }, ) ) offset_cases.append( ( QuarterBegin(startingMonth=1, n=-1), { datetime(2008, 1, 1): datetime(2007, 10, 1), datetime(2008, 1, 31): datetime(2008, 1, 1), datetime(2008, 2, 15): datetime(2008, 1, 1), datetime(2008, 2, 29): datetime(2008, 1, 1), datetime(2008, 3, 15): datetime(2008, 1, 1), datetime(2008, 3, 31): datetime(2008, 1, 1), datetime(2008, 4, 15): datetime(2008, 4, 1), datetime(2008, 4, 30): datetime(2008, 4, 1), datetime(2008, 7, 1): datetime(2008, 4, 1), }, ) ) offset_cases.append( ( QuarterBegin(startingMonth=1, n=2), { datetime(2008, 1, 1): datetime(2008, 7, 1), datetime(2008, 2, 15): datetime(2008, 7, 1), datetime(2008, 2, 29): datetime(2008, 7, 1), datetime(2008, 3, 15): datetime(2008, 7, 1), datetime(2008, 3, 31): datetime(2008, 7, 1), datetime(2008, 4, 15): datetime(2008, 10, 1), datetime(2008, 4, 1): datetime(2008, 10, 1), }, ) ) @pytest.mark.parametrize("case", offset_cases) def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected)
TestQuarterBegin
python
streamlit__streamlit
lib/streamlit/runtime/state/session_state_proxy.py
{ "start": 2623, "end": 5585 }
class ____(MutableMapping[Key, Any]): """A stateless singleton that proxies `st.session_state` interactions to the current script thread's SessionState instance. The proxy API differs slightly from SessionState: it does not allow callers to get, set, or iterate over "keyless" widgets (that is, widgets that were created without a user_key, and have autogenerated keys). """ def __iter__(self) -> Iterator[Any]: """Iterator over user state and keyed widget values.""" # TODO: this is unsafe if fastReruns is true! Let's deprecate/remove. return iter(get_session_state().filtered_state) def __len__(self) -> int: """Number of user state and keyed widget values in session_state.""" return len(get_session_state().filtered_state) def __str__(self) -> str: """String representation of user state and keyed widget values.""" return str(get_session_state().filtered_state) def __getitem__(self, key: Key) -> Any: """Return the state or widget value with the given key. Raises ------ StreamlitAPIException If the key is not a valid SessionState user key. """ key = str(key) require_valid_user_key(key) return get_session_state()[key] @gather_metrics("session_state.set_item") def __setitem__(self, key: Key, value: Any) -> None: """Set the value of the given key. Raises ------ StreamlitAPIException If the key is not a valid SessionState user key. """ key = str(key) require_valid_user_key(key) get_session_state()[key] = value def __delitem__(self, key: Key) -> None: """Delete the value with the given key. Raises ------ StreamlitAPIException If the key is not a valid SessionState user key. """ key = str(key) require_valid_user_key(key) del get_session_state()[key] def __getattr__(self, key: str) -> Any: try: return self[key] except KeyError: raise AttributeError(_missing_attr_error_message(key)) @gather_metrics("session_state.set_attr") def __setattr__(self, key: str, value: Any) -> None: self[key] = value def __delattr__(self, key: str) -> None: try: del self[key] except KeyError: raise AttributeError(_missing_attr_error_message(key)) def to_dict(self) -> dict[str, Any]: """Return a dict containing all session_state and keyed widget values.""" return get_session_state().filtered_state def _missing_attr_error_message(attr_name: str) -> str: return ( f'st.session_state has no attribute "{attr_name}". Did you forget to initialize it? ' f"More info: https://docs.streamlit.io/develop/concepts/architecture/session-state#initialization" )
SessionStateProxy
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_clear_thinking_20251015_edit_param.py
{ "start": 502, "end": 779 }
class ____(TypedDict, total=False): type: Required[Literal["clear_thinking_20251015"]] keep: Keep """Number of most recent assistant turns to keep thinking blocks for. Older turns will have their thinking blocks removed. """
BetaClearThinking20251015EditParam
python
PyCQA__pylint
tests/functional/n/non/non_iterator_returned.py
{ "start": 2097, "end": 2243 }
class ____: """__iter__ returns a class.""" def __iter__(self): # [non-iterator-returned] return ThirdBadIterator
FourthBadIterator
python
django__django
tests/test_client_regress/tests.py
{ "start": 29899, "end": 31218 }
class ____(SimpleTestCase): def test_simple_argument_get(self): "Get a view that has a simple string argument" response = self.client.get(reverse("arg_view", args=["Slartibartfast"])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"Howdy, Slartibartfast") def test_argument_with_space_get(self): "Get a view that has a string argument that requires escaping" response = self.client.get(reverse("arg_view", args=["Arthur Dent"])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"Hi, Arthur") def test_simple_argument_post(self): "Post for a view that has a simple string argument" response = self.client.post(reverse("arg_view", args=["Slartibartfast"])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"Howdy, Slartibartfast") def test_argument_with_space_post(self): "Post for a view that has a string argument that requires escaping" response = self.client.post(reverse("arg_view", args=["Arthur Dent"])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"Hi, Arthur") @override_settings(ROOT_URLCONF="test_client_regress.urls")
URLEscapingTests
python
wandb__wandb
wandb/vendor/pygments/lexers/lisp.py
{ "start": 16554, "end": 78343 }
class ____(RegexLexer): """ Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly known as PLT Scheme). .. versionadded:: 1.6 """ name = 'Racket' aliases = ['racket', 'rkt'] filenames = ['*.rkt', '*.rktd', '*.rktl'] mimetypes = ['text/x-racket', 'application/x-racket'] # Generated by example.rkt _keywords = ( u'#%app', u'#%datum', u'#%declare', u'#%expression', u'#%module-begin', u'#%plain-app', u'#%plain-lambda', u'#%plain-module-begin', u'#%printing-module-begin', u'#%provide', u'#%require', u'#%stratified-body', u'#%top', u'#%top-interaction', u'#%variable-reference', u'->', u'->*', u'->*m', u'->d', u'->dm', u'->i', u'->m', u'...', u':do-in', u'==', u'=>', u'_', u'absent', u'abstract', u'all-defined-out', u'all-from-out', u'and', u'any', u'augment', u'augment*', u'augment-final', u'augment-final*', u'augride', u'augride*', u'begin', u'begin-for-syntax', u'begin0', u'case', u'case->', u'case->m', u'case-lambda', u'class', u'class*', u'class-field-accessor', u'class-field-mutator', u'class/c', u'class/derived', u'combine-in', u'combine-out', u'command-line', u'compound-unit', u'compound-unit/infer', u'cond', u'cons/dc', u'contract', u'contract-out', u'contract-struct', u'contracted', u'define', u'define-compound-unit', u'define-compound-unit/infer', u'define-contract-struct', u'define-custom-hash-types', u'define-custom-set-types', u'define-for-syntax', u'define-local-member-name', u'define-logger', u'define-match-expander', u'define-member-name', u'define-module-boundary-contract', u'define-namespace-anchor', u'define-opt/c', u'define-sequence-syntax', u'define-serializable-class', u'define-serializable-class*', u'define-signature', u'define-signature-form', u'define-struct', u'define-struct/contract', u'define-struct/derived', u'define-syntax', u'define-syntax-rule', u'define-syntaxes', u'define-unit', u'define-unit-binding', u'define-unit-from-context', u'define-unit/contract', u'define-unit/new-import-export', u'define-unit/s', u'define-values', u'define-values-for-export', u'define-values-for-syntax', u'define-values/invoke-unit', u'define-values/invoke-unit/infer', u'define/augment', u'define/augment-final', u'define/augride', u'define/contract', u'define/final-prop', u'define/match', u'define/overment', u'define/override', u'define/override-final', u'define/private', u'define/public', u'define/public-final', u'define/pubment', u'define/subexpression-pos-prop', u'define/subexpression-pos-prop/name', u'delay', u'delay/idle', u'delay/name', u'delay/strict', u'delay/sync', u'delay/thread', u'do', u'else', u'except', u'except-in', u'except-out', u'export', u'extends', u'failure-cont', u'false', u'false/c', u'field', u'field-bound?', u'file', u'flat-murec-contract', u'flat-rec-contract', u'for', u'for*', u'for*/and', u'for*/async', u'for*/first', u'for*/fold', u'for*/fold/derived', u'for*/hash', u'for*/hasheq', u'for*/hasheqv', u'for*/last', u'for*/list', u'for*/lists', u'for*/mutable-set', u'for*/mutable-seteq', u'for*/mutable-seteqv', u'for*/or', u'for*/product', u'for*/set', u'for*/seteq', u'for*/seteqv', u'for*/stream', u'for*/sum', u'for*/vector', u'for*/weak-set', u'for*/weak-seteq', u'for*/weak-seteqv', u'for-label', u'for-meta', u'for-syntax', u'for-template', u'for/and', u'for/async', u'for/first', u'for/fold', u'for/fold/derived', u'for/hash', u'for/hasheq', u'for/hasheqv', u'for/last', u'for/list', u'for/lists', u'for/mutable-set', u'for/mutable-seteq', u'for/mutable-seteqv', u'for/or', u'for/product', u'for/set', u'for/seteq', u'for/seteqv', u'for/stream', u'for/sum', u'for/vector', u'for/weak-set', u'for/weak-seteq', u'for/weak-seteqv', u'gen:custom-write', u'gen:dict', u'gen:equal+hash', u'gen:set', u'gen:stream', u'generic', u'get-field', u'hash/dc', u'if', u'implies', u'import', u'include', u'include-at/relative-to', u'include-at/relative-to/reader', u'include/reader', u'inherit', u'inherit-field', u'inherit/inner', u'inherit/super', u'init', u'init-depend', u'init-field', u'init-rest', u'inner', u'inspect', u'instantiate', u'interface', u'interface*', u'invariant-assertion', u'invoke-unit', u'invoke-unit/infer', u'lambda', u'lazy', u'let', u'let*', u'let*-values', u'let-syntax', u'let-syntaxes', u'let-values', u'let/cc', u'let/ec', u'letrec', u'letrec-syntax', u'letrec-syntaxes', u'letrec-syntaxes+values', u'letrec-values', u'lib', u'link', u'local', u'local-require', u'log-debug', u'log-error', u'log-fatal', u'log-info', u'log-warning', u'match', u'match*', u'match*/derived', u'match-define', u'match-define-values', u'match-lambda', u'match-lambda*', u'match-lambda**', u'match-let', u'match-let*', u'match-let*-values', u'match-let-values', u'match-letrec', u'match-letrec-values', u'match/derived', u'match/values', u'member-name-key', u'mixin', u'module', u'module*', u'module+', u'nand', u'new', u'nor', u'object-contract', u'object/c', u'only', u'only-in', u'only-meta-in', u'open', u'opt/c', u'or', u'overment', u'overment*', u'override', u'override*', u'override-final', u'override-final*', u'parameterize', u'parameterize*', u'parameterize-break', u'parametric->/c', u'place', u'place*', u'place/context', u'planet', u'prefix', u'prefix-in', u'prefix-out', u'private', u'private*', u'prompt-tag/c', u'protect-out', u'provide', u'provide-signature-elements', u'provide/contract', u'public', u'public*', u'public-final', u'public-final*', u'pubment', u'pubment*', u'quasiquote', u'quasisyntax', u'quasisyntax/loc', u'quote', u'quote-syntax', u'quote-syntax/prune', u'recontract-out', u'recursive-contract', u'relative-in', u'rename', u'rename-in', u'rename-inner', u'rename-out', u'rename-super', u'require', u'send', u'send*', u'send+', u'send-generic', u'send/apply', u'send/keyword-apply', u'set!', u'set!-values', u'set-field!', u'shared', u'stream', u'stream*', u'stream-cons', u'struct', u'struct*', u'struct-copy', u'struct-field-index', u'struct-out', u'struct/c', u'struct/ctc', u'struct/dc', u'submod', u'super', u'super-instantiate', u'super-make-object', u'super-new', u'syntax', u'syntax-case', u'syntax-case*', u'syntax-id-rules', u'syntax-rules', u'syntax/loc', u'tag', u'this', u'this%', u'thunk', u'thunk*', u'time', u'unconstrained-domain->', u'unit', u'unit-from-context', u'unit/c', u'unit/new-import-export', u'unit/s', u'unless', u'unquote', u'unquote-splicing', u'unsyntax', u'unsyntax-splicing', u'values/drop', u'when', u'with-continuation-mark', u'with-contract', u'with-contract-continuation-mark', u'with-handlers', u'with-handlers*', u'with-method', u'with-syntax', u'λ' ) # Generated by example.rkt _builtins = ( u'*', u'*list/c', u'+', u'-', u'/', u'<', u'</c', u'<=', u'<=/c', u'=', u'=/c', u'>', u'>/c', u'>=', u'>=/c', u'abort-current-continuation', u'abs', u'absolute-path?', u'acos', u'add-between', u'add1', u'alarm-evt', u'always-evt', u'and/c', u'andmap', u'angle', u'any/c', u'append', u'append*', u'append-map', u'apply', u'argmax', u'argmin', u'arithmetic-shift', u'arity-at-least', u'arity-at-least-value', u'arity-at-least?', u'arity-checking-wrapper', u'arity-includes?', u'arity=?', u'arrow-contract-info', u'arrow-contract-info-accepts-arglist', u'arrow-contract-info-chaperone-procedure', u'arrow-contract-info-check-first-order', u'arrow-contract-info?', u'asin', u'assf', u'assoc', u'assq', u'assv', u'atan', u'bad-number-of-results', u'banner', u'base->-doms/c', u'base->-rngs/c', u'base->?', u'between/c', u'bitwise-and', u'bitwise-bit-field', u'bitwise-bit-set?', u'bitwise-ior', u'bitwise-not', u'bitwise-xor', u'blame-add-car-context', u'blame-add-cdr-context', u'blame-add-context', u'blame-add-missing-party', u'blame-add-nth-arg-context', u'blame-add-range-context', u'blame-add-unknown-context', u'blame-context', u'blame-contract', u'blame-fmt->-string', u'blame-missing-party?', u'blame-negative', u'blame-original?', u'blame-positive', u'blame-replace-negative', u'blame-source', u'blame-swap', u'blame-swapped?', u'blame-update', u'blame-value', u'blame?', u'boolean=?', u'boolean?', u'bound-identifier=?', u'box', u'box-cas!', u'box-immutable', u'box-immutable/c', u'box/c', u'box?', u'break-enabled', u'break-parameterization?', u'break-thread', u'build-chaperone-contract-property', u'build-compound-type-name', u'build-contract-property', u'build-flat-contract-property', u'build-list', u'build-path', u'build-path/convention-type', u'build-string', u'build-vector', u'byte-pregexp', u'byte-pregexp?', u'byte-ready?', u'byte-regexp', u'byte-regexp?', u'byte?', u'bytes', u'bytes->immutable-bytes', u'bytes->list', u'bytes->path', u'bytes->path-element', u'bytes->string/latin-1', u'bytes->string/locale', u'bytes->string/utf-8', u'bytes-append', u'bytes-append*', u'bytes-close-converter', u'bytes-convert', u'bytes-convert-end', u'bytes-converter?', u'bytes-copy', u'bytes-copy!', u'bytes-environment-variable-name?', u'bytes-fill!', u'bytes-join', u'bytes-length', u'bytes-no-nuls?', u'bytes-open-converter', u'bytes-ref', u'bytes-set!', u'bytes-utf-8-index', u'bytes-utf-8-length', u'bytes-utf-8-ref', u'bytes<?', u'bytes=?', u'bytes>?', u'bytes?', u'caaaar', u'caaadr', u'caaar', u'caadar', u'caaddr', u'caadr', u'caar', u'cadaar', u'cadadr', u'cadar', u'caddar', u'cadddr', u'caddr', u'cadr', u'call-in-nested-thread', u'call-with-atomic-output-file', u'call-with-break-parameterization', u'call-with-composable-continuation', u'call-with-continuation-barrier', u'call-with-continuation-prompt', u'call-with-current-continuation', u'call-with-default-reading-parameterization', u'call-with-escape-continuation', u'call-with-exception-handler', u'call-with-file-lock/timeout', u'call-with-immediate-continuation-mark', u'call-with-input-bytes', u'call-with-input-file', u'call-with-input-file*', u'call-with-input-string', u'call-with-output-bytes', u'call-with-output-file', u'call-with-output-file*', u'call-with-output-string', u'call-with-parameterization', u'call-with-semaphore', u'call-with-semaphore/enable-break', u'call-with-values', u'call/cc', u'call/ec', u'car', u'cartesian-product', u'cdaaar', u'cdaadr', u'cdaar', u'cdadar', u'cdaddr', u'cdadr', u'cdar', u'cddaar', u'cddadr', u'cddar', u'cdddar', u'cddddr', u'cdddr', u'cddr', u'cdr', u'ceiling', u'channel-get', u'channel-put', u'channel-put-evt', u'channel-put-evt?', u'channel-try-get', u'channel/c', u'channel?', u'chaperone-box', u'chaperone-channel', u'chaperone-continuation-mark-key', u'chaperone-contract-property?', u'chaperone-contract?', u'chaperone-evt', u'chaperone-hash', u'chaperone-hash-set', u'chaperone-of?', u'chaperone-procedure', u'chaperone-procedure*', u'chaperone-prompt-tag', u'chaperone-struct', u'chaperone-struct-type', u'chaperone-vector', u'chaperone?', u'char->integer', u'char-alphabetic?', u'char-blank?', u'char-ci<=?', u'char-ci<?', u'char-ci=?', u'char-ci>=?', u'char-ci>?', u'char-downcase', u'char-foldcase', u'char-general-category', u'char-graphic?', u'char-in', u'char-in/c', u'char-iso-control?', u'char-lower-case?', u'char-numeric?', u'char-punctuation?', u'char-ready?', u'char-symbolic?', u'char-title-case?', u'char-titlecase', u'char-upcase', u'char-upper-case?', u'char-utf-8-length', u'char-whitespace?', u'char<=?', u'char<?', u'char=?', u'char>=?', u'char>?', u'char?', u'check-duplicate-identifier', u'check-duplicates', u'checked-procedure-check-and-extract', u'choice-evt', u'class->interface', u'class-info', u'class-seal', u'class-unseal', u'class?', u'cleanse-path', u'close-input-port', u'close-output-port', u'coerce-chaperone-contract', u'coerce-chaperone-contracts', u'coerce-contract', u'coerce-contract/f', u'coerce-contracts', u'coerce-flat-contract', u'coerce-flat-contracts', u'collect-garbage', u'collection-file-path', u'collection-path', u'combinations', u'compile', u'compile-allow-set!-undefined', u'compile-context-preservation-enabled', u'compile-enforce-module-constants', u'compile-syntax', u'compiled-expression-recompile', u'compiled-expression?', u'compiled-module-expression?', u'complete-path?', u'complex?', u'compose', u'compose1', u'conjoin', u'conjugate', u'cons', u'cons/c', u'cons?', u'const', u'continuation-mark-key/c', u'continuation-mark-key?', u'continuation-mark-set->context', u'continuation-mark-set->list', u'continuation-mark-set->list*', u'continuation-mark-set-first', u'continuation-mark-set?', u'continuation-marks', u'continuation-prompt-available?', u'continuation-prompt-tag?', u'continuation?', u'contract-continuation-mark-key', u'contract-custom-write-property-proc', u'contract-exercise', u'contract-first-order', u'contract-first-order-passes?', u'contract-late-neg-projection', u'contract-name', u'contract-proc', u'contract-projection', u'contract-property?', u'contract-random-generate', u'contract-random-generate-fail', u'contract-random-generate-fail?', u'contract-random-generate-get-current-environment', u'contract-random-generate-stash', u'contract-random-generate/choose', u'contract-stronger?', u'contract-struct-exercise', u'contract-struct-generate', u'contract-struct-late-neg-projection', u'contract-struct-list-contract?', u'contract-val-first-projection', u'contract?', u'convert-stream', u'copy-directory/files', u'copy-file', u'copy-port', u'cos', u'cosh', u'count', u'current-blame-format', u'current-break-parameterization', u'current-code-inspector', u'current-command-line-arguments', u'current-compile', u'current-compiled-file-roots', u'current-continuation-marks', u'current-contract-region', u'current-custodian', u'current-directory', u'current-directory-for-user', u'current-drive', u'current-environment-variables', u'current-error-port', u'current-eval', u'current-evt-pseudo-random-generator', u'current-force-delete-permissions', u'current-future', u'current-gc-milliseconds', u'current-get-interaction-input-port', u'current-inexact-milliseconds', u'current-input-port', u'current-inspector', u'current-library-collection-links', u'current-library-collection-paths', u'current-load', u'current-load-extension', u'current-load-relative-directory', u'current-load/use-compiled', u'current-locale', u'current-logger', u'current-memory-use', u'current-milliseconds', u'current-module-declare-name', u'current-module-declare-source', u'current-module-name-resolver', u'current-module-path-for-load', u'current-namespace', u'current-output-port', u'current-parameterization', u'current-plumber', u'current-preserved-thread-cell-values', u'current-print', u'current-process-milliseconds', u'current-prompt-read', u'current-pseudo-random-generator', u'current-read-interaction', u'current-reader-guard', u'current-readtable', u'current-seconds', u'current-security-guard', u'current-subprocess-custodian-mode', u'current-thread', u'current-thread-group', u'current-thread-initial-stack-size', u'current-write-relative-directory', u'curry', u'curryr', u'custodian-box-value', u'custodian-box?', u'custodian-limit-memory', u'custodian-managed-list', u'custodian-memory-accounting-available?', u'custodian-require-memory', u'custodian-shutdown-all', u'custodian?', u'custom-print-quotable-accessor', u'custom-print-quotable?', u'custom-write-accessor', u'custom-write-property-proc', u'custom-write?', u'date', u'date*', u'date*-nanosecond', u'date*-time-zone-name', u'date*?', u'date-day', u'date-dst?', u'date-hour', u'date-minute', u'date-month', u'date-second', u'date-time-zone-offset', u'date-week-day', u'date-year', u'date-year-day', u'date?', u'datum->syntax', u'datum-intern-literal', u'default-continuation-prompt-tag', u'degrees->radians', u'delete-directory', u'delete-directory/files', u'delete-file', u'denominator', u'dict->list', u'dict-can-functional-set?', u'dict-can-remove-keys?', u'dict-clear', u'dict-clear!', u'dict-copy', u'dict-count', u'dict-empty?', u'dict-for-each', u'dict-has-key?', u'dict-implements/c', u'dict-implements?', u'dict-iter-contract', u'dict-iterate-first', u'dict-iterate-key', u'dict-iterate-next', u'dict-iterate-value', u'dict-key-contract', u'dict-keys', u'dict-map', u'dict-mutable?', u'dict-ref', u'dict-ref!', u'dict-remove', u'dict-remove!', u'dict-set', u'dict-set!', u'dict-set*', u'dict-set*!', u'dict-update', u'dict-update!', u'dict-value-contract', u'dict-values', u'dict?', u'directory-exists?', u'directory-list', u'disjoin', u'display', u'display-lines', u'display-lines-to-file', u'display-to-file', u'displayln', u'double-flonum?', u'drop', u'drop-common-prefix', u'drop-right', u'dropf', u'dropf-right', u'dump-memory-stats', u'dup-input-port', u'dup-output-port', u'dynamic->*', u'dynamic-get-field', u'dynamic-object/c', u'dynamic-place', u'dynamic-place*', u'dynamic-require', u'dynamic-require-for-syntax', u'dynamic-send', u'dynamic-set-field!', u'dynamic-wind', u'eighth', u'empty', u'empty-sequence', u'empty-stream', u'empty?', u'environment-variables-copy', u'environment-variables-names', u'environment-variables-ref', u'environment-variables-set!', u'environment-variables?', u'eof', u'eof-evt', u'eof-object?', u'ephemeron-value', u'ephemeron?', u'eprintf', u'eq-contract-val', u'eq-contract?', u'eq-hash-code', u'eq?', u'equal-contract-val', u'equal-contract?', u'equal-hash-code', u'equal-secondary-hash-code', u'equal<%>', u'equal?', u'equal?/recur', u'eqv-hash-code', u'eqv?', u'error', u'error-display-handler', u'error-escape-handler', u'error-print-context-length', u'error-print-source-location', u'error-print-width', u'error-value->string-handler', u'eval', u'eval-jit-enabled', u'eval-syntax', u'even?', u'evt/c', u'evt?', u'exact->inexact', u'exact-ceiling', u'exact-floor', u'exact-integer?', u'exact-nonnegative-integer?', u'exact-positive-integer?', u'exact-round', u'exact-truncate', u'exact?', u'executable-yield-handler', u'exit', u'exit-handler', u'exn', u'exn-continuation-marks', u'exn-message', u'exn:break', u'exn:break-continuation', u'exn:break:hang-up', u'exn:break:hang-up?', u'exn:break:terminate', u'exn:break:terminate?', u'exn:break?', u'exn:fail', u'exn:fail:contract', u'exn:fail:contract:arity', u'exn:fail:contract:arity?', u'exn:fail:contract:blame', u'exn:fail:contract:blame-object', u'exn:fail:contract:blame?', u'exn:fail:contract:continuation', u'exn:fail:contract:continuation?', u'exn:fail:contract:divide-by-zero', u'exn:fail:contract:divide-by-zero?', u'exn:fail:contract:non-fixnum-result', u'exn:fail:contract:non-fixnum-result?', u'exn:fail:contract:variable', u'exn:fail:contract:variable-id', u'exn:fail:contract:variable?', u'exn:fail:contract?', u'exn:fail:filesystem', u'exn:fail:filesystem:errno', u'exn:fail:filesystem:errno-errno', u'exn:fail:filesystem:errno?', u'exn:fail:filesystem:exists', u'exn:fail:filesystem:exists?', u'exn:fail:filesystem:missing-module', u'exn:fail:filesystem:missing-module-path', u'exn:fail:filesystem:missing-module?', u'exn:fail:filesystem:version', u'exn:fail:filesystem:version?', u'exn:fail:filesystem?', u'exn:fail:network', u'exn:fail:network:errno', u'exn:fail:network:errno-errno', u'exn:fail:network:errno?', u'exn:fail:network?', u'exn:fail:object', u'exn:fail:object?', u'exn:fail:out-of-memory', u'exn:fail:out-of-memory?', u'exn:fail:read', u'exn:fail:read-srclocs', u'exn:fail:read:eof', u'exn:fail:read:eof?', u'exn:fail:read:non-char', u'exn:fail:read:non-char?', u'exn:fail:read?', u'exn:fail:syntax', u'exn:fail:syntax-exprs', u'exn:fail:syntax:missing-module', u'exn:fail:syntax:missing-module-path', u'exn:fail:syntax:missing-module?', u'exn:fail:syntax:unbound', u'exn:fail:syntax:unbound?', u'exn:fail:syntax?', u'exn:fail:unsupported', u'exn:fail:unsupported?', u'exn:fail:user', u'exn:fail:user?', u'exn:fail?', u'exn:misc:match?', u'exn:missing-module-accessor', u'exn:missing-module?', u'exn:srclocs-accessor', u'exn:srclocs?', u'exn?', u'exp', u'expand', u'expand-once', u'expand-syntax', u'expand-syntax-once', u'expand-syntax-to-top-form', u'expand-to-top-form', u'expand-user-path', u'explode-path', u'expt', u'externalizable<%>', u'failure-result/c', u'false?', u'field-names', u'fifth', u'file->bytes', u'file->bytes-lines', u'file->lines', u'file->list', u'file->string', u'file->value', u'file-exists?', u'file-name-from-path', u'file-or-directory-identity', u'file-or-directory-modify-seconds', u'file-or-directory-permissions', u'file-position', u'file-position*', u'file-size', u'file-stream-buffer-mode', u'file-stream-port?', u'file-truncate', u'filename-extension', u'filesystem-change-evt', u'filesystem-change-evt-cancel', u'filesystem-change-evt?', u'filesystem-root-list', u'filter', u'filter-map', u'filter-not', u'filter-read-input-port', u'find-executable-path', u'find-files', u'find-library-collection-links', u'find-library-collection-paths', u'find-relative-path', u'find-system-path', u'findf', u'first', u'first-or/c', u'fixnum?', u'flat-contract', u'flat-contract-predicate', u'flat-contract-property?', u'flat-contract?', u'flat-named-contract', u'flatten', u'floating-point-bytes->real', u'flonum?', u'floor', u'flush-output', u'fold-files', u'foldl', u'foldr', u'for-each', u'force', u'format', u'fourth', u'fprintf', u'free-identifier=?', u'free-label-identifier=?', u'free-template-identifier=?', u'free-transformer-identifier=?', u'fsemaphore-count', u'fsemaphore-post', u'fsemaphore-try-wait?', u'fsemaphore-wait', u'fsemaphore?', u'future', u'future?', u'futures-enabled?', u'gcd', u'generate-member-key', u'generate-temporaries', u'generic-set?', u'generic?', u'gensym', u'get-output-bytes', u'get-output-string', u'get-preference', u'get/build-late-neg-projection', u'get/build-val-first-projection', u'getenv', u'global-port-print-handler', u'group-by', u'group-execute-bit', u'group-read-bit', u'group-write-bit', u'guard-evt', u'handle-evt', u'handle-evt?', u'has-blame?', u'has-contract?', u'hash', u'hash->list', u'hash-clear', u'hash-clear!', u'hash-copy', u'hash-copy-clear', u'hash-count', u'hash-empty?', u'hash-eq?', u'hash-equal?', u'hash-eqv?', u'hash-for-each', u'hash-has-key?', u'hash-iterate-first', u'hash-iterate-key', u'hash-iterate-key+value', u'hash-iterate-next', u'hash-iterate-pair', u'hash-iterate-value', u'hash-keys', u'hash-map', u'hash-placeholder?', u'hash-ref', u'hash-ref!', u'hash-remove', u'hash-remove!', u'hash-set', u'hash-set!', u'hash-set*', u'hash-set*!', u'hash-update', u'hash-update!', u'hash-values', u'hash-weak?', u'hash/c', u'hash?', u'hasheq', u'hasheqv', u'identifier-binding', u'identifier-binding-symbol', u'identifier-label-binding', u'identifier-prune-lexical-context', u'identifier-prune-to-source-module', u'identifier-remove-from-definition-context', u'identifier-template-binding', u'identifier-transformer-binding', u'identifier?', u'identity', u'if/c', u'imag-part', u'immutable?', u'impersonate-box', u'impersonate-channel', u'impersonate-continuation-mark-key', u'impersonate-hash', u'impersonate-hash-set', u'impersonate-procedure', u'impersonate-procedure*', u'impersonate-prompt-tag', u'impersonate-struct', u'impersonate-vector', u'impersonator-contract?', u'impersonator-ephemeron', u'impersonator-of?', u'impersonator-prop:application-mark', u'impersonator-prop:blame', u'impersonator-prop:contracted', u'impersonator-property-accessor-procedure?', u'impersonator-property?', u'impersonator?', u'implementation?', u'implementation?/c', u'in-bytes', u'in-bytes-lines', u'in-combinations', u'in-cycle', u'in-dict', u'in-dict-keys', u'in-dict-pairs', u'in-dict-values', u'in-directory', u'in-hash', u'in-hash-keys', u'in-hash-pairs', u'in-hash-values', u'in-immutable-hash', u'in-immutable-hash-keys', u'in-immutable-hash-pairs', u'in-immutable-hash-values', u'in-immutable-set', u'in-indexed', u'in-input-port-bytes', u'in-input-port-chars', u'in-lines', u'in-list', u'in-mlist', u'in-mutable-hash', u'in-mutable-hash-keys', u'in-mutable-hash-pairs', u'in-mutable-hash-values', u'in-mutable-set', u'in-naturals', u'in-parallel', u'in-permutations', u'in-port', u'in-producer', u'in-range', u'in-sequences', u'in-set', u'in-slice', u'in-stream', u'in-string', u'in-syntax', u'in-value', u'in-values*-sequence', u'in-values-sequence', u'in-vector', u'in-weak-hash', u'in-weak-hash-keys', u'in-weak-hash-pairs', u'in-weak-hash-values', u'in-weak-set', u'inexact->exact', u'inexact-real?', u'inexact?', u'infinite?', u'input-port-append', u'input-port?', u'inspector?', u'instanceof/c', u'integer->char', u'integer->integer-bytes', u'integer-bytes->integer', u'integer-in', u'integer-length', u'integer-sqrt', u'integer-sqrt/remainder', u'integer?', u'interface->method-names', u'interface-extension?', u'interface?', u'internal-definition-context-binding-identifiers', u'internal-definition-context-introduce', u'internal-definition-context-seal', u'internal-definition-context?', u'is-a?', u'is-a?/c', u'keyword->string', u'keyword-apply', u'keyword<?', u'keyword?', u'keywords-match', u'kill-thread', u'last', u'last-pair', u'lcm', u'length', u'liberal-define-context?', u'link-exists?', u'list', u'list*', u'list*of', u'list->bytes', u'list->mutable-set', u'list->mutable-seteq', u'list->mutable-seteqv', u'list->set', u'list->seteq', u'list->seteqv', u'list->string', u'list->vector', u'list->weak-set', u'list->weak-seteq', u'list->weak-seteqv', u'list-contract?', u'list-prefix?', u'list-ref', u'list-set', u'list-tail', u'list-update', u'list/c', u'list?', u'listen-port-number?', u'listof', u'load', u'load-extension', u'load-on-demand-enabled', u'load-relative', u'load-relative-extension', u'load/cd', u'load/use-compiled', u'local-expand', u'local-expand/capture-lifts', u'local-transformer-expand', u'local-transformer-expand/capture-lifts', u'locale-string-encoding', u'log', u'log-all-levels', u'log-level-evt', u'log-level?', u'log-max-level', u'log-message', u'log-receiver?', u'logger-name', u'logger?', u'magnitude', u'make-arity-at-least', u'make-base-empty-namespace', u'make-base-namespace', u'make-bytes', u'make-channel', u'make-chaperone-contract', u'make-continuation-mark-key', u'make-continuation-prompt-tag', u'make-contract', u'make-custodian', u'make-custodian-box', u'make-custom-hash', u'make-custom-hash-types', u'make-custom-set', u'make-custom-set-types', u'make-date', u'make-date*', u'make-derived-parameter', u'make-directory', u'make-directory*', u'make-do-sequence', u'make-empty-namespace', u'make-environment-variables', u'make-ephemeron', u'make-exn', u'make-exn:break', u'make-exn:break:hang-up', u'make-exn:break:terminate', u'make-exn:fail', u'make-exn:fail:contract', u'make-exn:fail:contract:arity', u'make-exn:fail:contract:blame', u'make-exn:fail:contract:continuation', u'make-exn:fail:contract:divide-by-zero', u'make-exn:fail:contract:non-fixnum-result', u'make-exn:fail:contract:variable', u'make-exn:fail:filesystem', u'make-exn:fail:filesystem:errno', u'make-exn:fail:filesystem:exists', u'make-exn:fail:filesystem:missing-module', u'make-exn:fail:filesystem:version', u'make-exn:fail:network', u'make-exn:fail:network:errno', u'make-exn:fail:object', u'make-exn:fail:out-of-memory', u'make-exn:fail:read', u'make-exn:fail:read:eof', u'make-exn:fail:read:non-char', u'make-exn:fail:syntax', u'make-exn:fail:syntax:missing-module', u'make-exn:fail:syntax:unbound', u'make-exn:fail:unsupported', u'make-exn:fail:user', u'make-file-or-directory-link', u'make-flat-contract', u'make-fsemaphore', u'make-generic', u'make-handle-get-preference-locked', u'make-hash', u'make-hash-placeholder', u'make-hasheq', u'make-hasheq-placeholder', u'make-hasheqv', u'make-hasheqv-placeholder', u'make-immutable-custom-hash', u'make-immutable-hash', u'make-immutable-hasheq', u'make-immutable-hasheqv', u'make-impersonator-property', u'make-input-port', u'make-input-port/read-to-peek', u'make-inspector', u'make-keyword-procedure', u'make-known-char-range-list', u'make-limited-input-port', u'make-list', u'make-lock-file-name', u'make-log-receiver', u'make-logger', u'make-mixin-contract', u'make-mutable-custom-set', u'make-none/c', u'make-object', u'make-output-port', u'make-parameter', u'make-parent-directory*', u'make-phantom-bytes', u'make-pipe', u'make-pipe-with-specials', u'make-placeholder', u'make-plumber', u'make-polar', u'make-prefab-struct', u'make-primitive-class', u'make-proj-contract', u'make-pseudo-random-generator', u'make-reader-graph', u'make-readtable', u'make-rectangular', u'make-rename-transformer', u'make-resolved-module-path', u'make-security-guard', u'make-semaphore', u'make-set!-transformer', u'make-shared-bytes', u'make-sibling-inspector', u'make-special-comment', u'make-srcloc', u'make-string', u'make-struct-field-accessor', u'make-struct-field-mutator', u'make-struct-type', u'make-struct-type-property', u'make-syntax-delta-introducer', u'make-syntax-introducer', u'make-temporary-file', u'make-tentative-pretty-print-output-port', u'make-thread-cell', u'make-thread-group', u'make-vector', u'make-weak-box', u'make-weak-custom-hash', u'make-weak-custom-set', u'make-weak-hash', u'make-weak-hasheq', u'make-weak-hasheqv', u'make-will-executor', u'map', u'match-equality-test', u'matches-arity-exactly?', u'max', u'mcar', u'mcdr', u'mcons', u'member', u'member-name-key-hash-code', u'member-name-key=?', u'member-name-key?', u'memf', u'memq', u'memv', u'merge-input', u'method-in-interface?', u'min', u'mixin-contract', u'module->exports', u'module->imports', u'module->language-info', u'module->namespace', u'module-compiled-cross-phase-persistent?', u'module-compiled-exports', u'module-compiled-imports', u'module-compiled-language-info', u'module-compiled-name', u'module-compiled-submodules', u'module-declared?', u'module-path-index-join', u'module-path-index-resolve', u'module-path-index-split', u'module-path-index-submodule', u'module-path-index?', u'module-path?', u'module-predefined?', u'module-provide-protected?', u'modulo', u'mpair?', u'mutable-set', u'mutable-seteq', u'mutable-seteqv', u'n->th', u'nack-guard-evt', u'namespace-anchor->empty-namespace', u'namespace-anchor->namespace', u'namespace-anchor?', u'namespace-attach-module', u'namespace-attach-module-declaration', u'namespace-base-phase', u'namespace-mapped-symbols', u'namespace-module-identifier', u'namespace-module-registry', u'namespace-require', u'namespace-require/constant', u'namespace-require/copy', u'namespace-require/expansion-time', u'namespace-set-variable-value!', u'namespace-symbol->identifier', u'namespace-syntax-introduce', u'namespace-undefine-variable!', u'namespace-unprotect-module', u'namespace-variable-value', u'namespace?', u'nan?', u'natural-number/c', u'negate', u'negative?', u'never-evt', u'new-∀/c', u'new-∃/c', u'newline', u'ninth', u'non-empty-listof', u'non-empty-string?', u'none/c', u'normal-case-path', u'normalize-arity', u'normalize-path', u'normalized-arity?', u'not', u'not/c', u'null', u'null?', u'number->string', u'number?', u'numerator', u'object%', u'object->vector', u'object-info', u'object-interface', u'object-method-arity-includes?', u'object-name', u'object-or-false=?', u'object=?', u'object?', u'odd?', u'one-of/c', u'open-input-bytes', u'open-input-file', u'open-input-output-file', u'open-input-string', u'open-output-bytes', u'open-output-file', u'open-output-nowhere', u'open-output-string', u'or/c', u'order-of-magnitude', u'ormap', u'other-execute-bit', u'other-read-bit', u'other-write-bit', u'output-port?', u'pair?', u'parameter-procedure=?', u'parameter/c', u'parameter?', u'parameterization?', u'parse-command-line', u'partition', u'path->bytes', u'path->complete-path', u'path->directory-path', u'path->string', u'path-add-suffix', u'path-convention-type', u'path-element->bytes', u'path-element->string', u'path-element?', u'path-for-some-system?', u'path-list-string->path-list', u'path-only', u'path-replace-suffix', u'path-string?', u'path<?', u'path?', u'pathlist-closure', u'peek-byte', u'peek-byte-or-special', u'peek-bytes', u'peek-bytes!', u'peek-bytes!-evt', u'peek-bytes-avail!', u'peek-bytes-avail!*', u'peek-bytes-avail!-evt', u'peek-bytes-avail!/enable-break', u'peek-bytes-evt', u'peek-char', u'peek-char-or-special', u'peek-string', u'peek-string!', u'peek-string!-evt', u'peek-string-evt', u'peeking-input-port', u'permutations', u'phantom-bytes?', u'pi', u'pi.f', u'pipe-content-length', u'place-break', u'place-channel', u'place-channel-get', u'place-channel-put', u'place-channel-put/get', u'place-channel?', u'place-dead-evt', u'place-enabled?', u'place-kill', u'place-location?', u'place-message-allowed?', u'place-sleep', u'place-wait', u'place?', u'placeholder-get', u'placeholder-set!', u'placeholder?', u'plumber-add-flush!', u'plumber-flush-all', u'plumber-flush-handle-remove!', u'plumber-flush-handle?', u'plumber?', u'poll-guard-evt', u'port->bytes', u'port->bytes-lines', u'port->lines', u'port->list', u'port->string', u'port-closed-evt', u'port-closed?', u'port-commit-peeked', u'port-count-lines!', u'port-count-lines-enabled', u'port-counts-lines?', u'port-display-handler', u'port-file-identity', u'port-file-unlock', u'port-next-location', u'port-number?', u'port-print-handler', u'port-progress-evt', u'port-provides-progress-evts?', u'port-read-handler', u'port-try-file-lock?', u'port-write-handler', u'port-writes-atomic?', u'port-writes-special?', u'port?', u'positive?', u'predicate/c', u'prefab-key->struct-type', u'prefab-key?', u'prefab-struct-key', u'preferences-lock-file-mode', u'pregexp', u'pregexp?', u'pretty-display', u'pretty-format', u'pretty-print', u'pretty-print-.-symbol-without-bars', u'pretty-print-abbreviate-read-macros', u'pretty-print-columns', u'pretty-print-current-style-table', u'pretty-print-depth', u'pretty-print-exact-as-decimal', u'pretty-print-extend-style-table', u'pretty-print-handler', u'pretty-print-newline', u'pretty-print-post-print-hook', u'pretty-print-pre-print-hook', u'pretty-print-print-hook', u'pretty-print-print-line', u'pretty-print-remap-stylable', u'pretty-print-show-inexactness', u'pretty-print-size-hook', u'pretty-print-style-table?', u'pretty-printing', u'pretty-write', u'primitive-closure?', u'primitive-result-arity', u'primitive?', u'print', u'print-as-expression', u'print-boolean-long-form', u'print-box', u'print-graph', u'print-hash-table', u'print-mpair-curly-braces', u'print-pair-curly-braces', u'print-reader-abbreviations', u'print-struct', u'print-syntax-width', u'print-unreadable', u'print-vector-length', u'printable/c', u'printable<%>', u'printf', u'println', u'procedure->method', u'procedure-arity', u'procedure-arity-includes/c', u'procedure-arity-includes?', u'procedure-arity?', u'procedure-closure-contents-eq?', u'procedure-extract-target', u'procedure-keywords', u'procedure-reduce-arity', u'procedure-reduce-keyword-arity', u'procedure-rename', u'procedure-result-arity', u'procedure-specialize', u'procedure-struct-type?', u'procedure?', u'process', u'process*', u'process*/ports', u'process/ports', u'processor-count', u'progress-evt?', u'promise-forced?', u'promise-running?', u'promise/c', u'promise/name?', u'promise?', u'prop:arity-string', u'prop:arrow-contract', u'prop:arrow-contract-get-info', u'prop:arrow-contract?', u'prop:blame', u'prop:chaperone-contract', u'prop:checked-procedure', u'prop:contract', u'prop:contracted', u'prop:custom-print-quotable', u'prop:custom-write', u'prop:dict', u'prop:dict/contract', u'prop:equal+hash', u'prop:evt', u'prop:exn:missing-module', u'prop:exn:srclocs', u'prop:expansion-contexts', u'prop:flat-contract', u'prop:impersonator-of', u'prop:input-port', u'prop:liberal-define-context', u'prop:object-name', u'prop:opt-chaperone-contract', u'prop:opt-chaperone-contract-get-test', u'prop:opt-chaperone-contract?', u'prop:orc-contract', u'prop:orc-contract-get-subcontracts', u'prop:orc-contract?', u'prop:output-port', u'prop:place-location', u'prop:procedure', u'prop:recursive-contract', u'prop:recursive-contract-unroll', u'prop:recursive-contract?', u'prop:rename-transformer', u'prop:sequence', u'prop:set!-transformer', u'prop:stream', u'proper-subset?', u'pseudo-random-generator->vector', u'pseudo-random-generator-vector?', u'pseudo-random-generator?', u'put-preferences', u'putenv', u'quotient', u'quotient/remainder', u'radians->degrees', u'raise', u'raise-argument-error', u'raise-arguments-error', u'raise-arity-error', u'raise-blame-error', u'raise-contract-error', u'raise-mismatch-error', u'raise-not-cons-blame-error', u'raise-range-error', u'raise-result-error', u'raise-syntax-error', u'raise-type-error', u'raise-user-error', u'random', u'random-seed', u'range', u'rational?', u'rationalize', u'read', u'read-accept-bar-quote', u'read-accept-box', u'read-accept-compiled', u'read-accept-dot', u'read-accept-graph', u'read-accept-infix-dot', u'read-accept-lang', u'read-accept-quasiquote', u'read-accept-reader', u'read-byte', u'read-byte-or-special', u'read-bytes', u'read-bytes!', u'read-bytes!-evt', u'read-bytes-avail!', u'read-bytes-avail!*', u'read-bytes-avail!-evt', u'read-bytes-avail!/enable-break', u'read-bytes-evt', u'read-bytes-line', u'read-bytes-line-evt', u'read-case-sensitive', u'read-cdot', u'read-char', u'read-char-or-special', u'read-curly-brace-as-paren', u'read-curly-brace-with-tag', u'read-decimal-as-inexact', u'read-eval-print-loop', u'read-language', u'read-line', u'read-line-evt', u'read-on-demand-source', u'read-square-bracket-as-paren', u'read-square-bracket-with-tag', u'read-string', u'read-string!', u'read-string!-evt', u'read-string-evt', u'read-syntax', u'read-syntax/recursive', u'read/recursive', u'readtable-mapping', u'readtable?', u'real->decimal-string', u'real->double-flonum', u'real->floating-point-bytes', u'real->single-flonum', u'real-in', u'real-part', u'real?', u'reencode-input-port', u'reencode-output-port', u'regexp', u'regexp-match', u'regexp-match*', u'regexp-match-evt', u'regexp-match-exact?', u'regexp-match-peek', u'regexp-match-peek-immediate', u'regexp-match-peek-positions', u'regexp-match-peek-positions*', u'regexp-match-peek-positions-immediate', u'regexp-match-peek-positions-immediate/end', u'regexp-match-peek-positions/end', u'regexp-match-positions', u'regexp-match-positions*', u'regexp-match-positions/end', u'regexp-match/end', u'regexp-match?', u'regexp-max-lookbehind', u'regexp-quote', u'regexp-replace', u'regexp-replace*', u'regexp-replace-quote', u'regexp-replaces', u'regexp-split', u'regexp-try-match', u'regexp?', u'relative-path?', u'relocate-input-port', u'relocate-output-port', u'remainder', u'remf', u'remf*', u'remove', u'remove*', u'remove-duplicates', u'remq', u'remq*', u'remv', u'remv*', u'rename-contract', u'rename-file-or-directory', u'rename-transformer-target', u'rename-transformer?', u'replace-evt', u'reroot-path', u'resolve-path', u'resolved-module-path-name', u'resolved-module-path?', u'rest', u'reverse', u'round', u'second', u'seconds->date', u'security-guard?', u'semaphore-peek-evt', u'semaphore-peek-evt?', u'semaphore-post', u'semaphore-try-wait?', u'semaphore-wait', u'semaphore-wait/enable-break', u'semaphore?', u'sequence->list', u'sequence->stream', u'sequence-add-between', u'sequence-andmap', u'sequence-append', u'sequence-count', u'sequence-filter', u'sequence-fold', u'sequence-for-each', u'sequence-generate', u'sequence-generate*', u'sequence-length', u'sequence-map', u'sequence-ormap', u'sequence-ref', u'sequence-tail', u'sequence/c', u'sequence?', u'set', u'set!-transformer-procedure', u'set!-transformer?', u'set->list', u'set->stream', u'set-add', u'set-add!', u'set-box!', u'set-clear', u'set-clear!', u'set-copy', u'set-copy-clear', u'set-count', u'set-empty?', u'set-eq?', u'set-equal?', u'set-eqv?', u'set-first', u'set-for-each', u'set-implements/c', u'set-implements?', u'set-intersect', u'set-intersect!', u'set-map', u'set-mcar!', u'set-mcdr!', u'set-member?', u'set-mutable?', u'set-phantom-bytes!', u'set-port-next-location!', u'set-remove', u'set-remove!', u'set-rest', u'set-some-basic-contracts!', u'set-subtract', u'set-subtract!', u'set-symmetric-difference', u'set-symmetric-difference!', u'set-union', u'set-union!', u'set-weak?', u'set/c', u'set=?', u'set?', u'seteq', u'seteqv', u'seventh', u'sgn', u'shared-bytes', u'shell-execute', u'shrink-path-wrt', u'shuffle', u'simple-form-path', u'simplify-path', u'sin', u'single-flonum?', u'sinh', u'sixth', u'skip-projection-wrapper?', u'sleep', u'some-system-path->string', u'sort', u'special-comment-value', u'special-comment?', u'special-filter-input-port', u'split-at', u'split-at-right', u'split-common-prefix', u'split-path', u'splitf-at', u'splitf-at-right', u'sqr', u'sqrt', u'srcloc', u'srcloc->string', u'srcloc-column', u'srcloc-line', u'srcloc-position', u'srcloc-source', u'srcloc-span', u'srcloc?', u'stop-after', u'stop-before', u'stream->list', u'stream-add-between', u'stream-andmap', u'stream-append', u'stream-count', u'stream-empty?', u'stream-filter', u'stream-first', u'stream-fold', u'stream-for-each', u'stream-length', u'stream-map', u'stream-ormap', u'stream-ref', u'stream-rest', u'stream-tail', u'stream/c', u'stream?', u'string', u'string->bytes/latin-1', u'string->bytes/locale', u'string->bytes/utf-8', u'string->immutable-string', u'string->keyword', u'string->list', u'string->number', u'string->path', u'string->path-element', u'string->some-system-path', u'string->symbol', u'string->uninterned-symbol', u'string->unreadable-symbol', u'string-append', u'string-append*', u'string-ci<=?', u'string-ci<?', u'string-ci=?', u'string-ci>=?', u'string-ci>?', u'string-contains?', u'string-copy', u'string-copy!', u'string-downcase', u'string-environment-variable-name?', u'string-fill!', u'string-foldcase', u'string-join', u'string-len/c', u'string-length', u'string-locale-ci<?', u'string-locale-ci=?', u'string-locale-ci>?', u'string-locale-downcase', u'string-locale-upcase', u'string-locale<?', u'string-locale=?', u'string-locale>?', u'string-no-nuls?', u'string-normalize-nfc', u'string-normalize-nfd', u'string-normalize-nfkc', u'string-normalize-nfkd', u'string-normalize-spaces', u'string-port?', u'string-prefix?', u'string-ref', u'string-replace', u'string-set!', u'string-split', u'string-suffix?', u'string-titlecase', u'string-trim', u'string-upcase', u'string-utf-8-length', u'string<=?', u'string<?', u'string=?', u'string>=?', u'string>?', u'string?', u'struct->vector', u'struct-accessor-procedure?', u'struct-constructor-procedure?', u'struct-info', u'struct-mutator-procedure?', u'struct-predicate-procedure?', u'struct-type-info', u'struct-type-make-constructor', u'struct-type-make-predicate', u'struct-type-property-accessor-procedure?', u'struct-type-property/c', u'struct-type-property?', u'struct-type?', u'struct:arity-at-least', u'struct:arrow-contract-info', u'struct:date', u'struct:date*', u'struct:exn', u'struct:exn:break', u'struct:exn:break:hang-up', u'struct:exn:break:terminate', u'struct:exn:fail', u'struct:exn:fail:contract', u'struct:exn:fail:contract:arity', u'struct:exn:fail:contract:blame', u'struct:exn:fail:contract:continuation', u'struct:exn:fail:contract:divide-by-zero', u'struct:exn:fail:contract:non-fixnum-result', u'struct:exn:fail:contract:variable', u'struct:exn:fail:filesystem', u'struct:exn:fail:filesystem:errno', u'struct:exn:fail:filesystem:exists', u'struct:exn:fail:filesystem:missing-module', u'struct:exn:fail:filesystem:version', u'struct:exn:fail:network', u'struct:exn:fail:network:errno', u'struct:exn:fail:object', u'struct:exn:fail:out-of-memory', u'struct:exn:fail:read', u'struct:exn:fail:read:eof', u'struct:exn:fail:read:non-char', u'struct:exn:fail:syntax', u'struct:exn:fail:syntax:missing-module', u'struct:exn:fail:syntax:unbound', u'struct:exn:fail:unsupported', u'struct:exn:fail:user', u'struct:srcloc', u'struct:wrapped-extra-arg-arrow', u'struct?', u'sub1', u'subbytes', u'subclass?', u'subclass?/c', u'subprocess', u'subprocess-group-enabled', u'subprocess-kill', u'subprocess-pid', u'subprocess-status', u'subprocess-wait', u'subprocess?', u'subset?', u'substring', u'suggest/c', u'symbol->string', u'symbol-interned?', u'symbol-unreadable?', u'symbol<?', u'symbol=?', u'symbol?', u'symbols', u'sync', u'sync/enable-break', u'sync/timeout', u'sync/timeout/enable-break', u'syntax->datum', u'syntax->list', u'syntax-arm', u'syntax-column', u'syntax-debug-info', u'syntax-disarm', u'syntax-e', u'syntax-line', u'syntax-local-bind-syntaxes', u'syntax-local-certifier', u'syntax-local-context', u'syntax-local-expand-expression', u'syntax-local-get-shadower', u'syntax-local-identifier-as-binding', u'syntax-local-introduce', u'syntax-local-lift-context', u'syntax-local-lift-expression', u'syntax-local-lift-module', u'syntax-local-lift-module-end-declaration', u'syntax-local-lift-provide', u'syntax-local-lift-require', u'syntax-local-lift-values-expression', u'syntax-local-make-definition-context', u'syntax-local-make-delta-introducer', u'syntax-local-module-defined-identifiers', u'syntax-local-module-exports', u'syntax-local-module-required-identifiers', u'syntax-local-name', u'syntax-local-phase-level', u'syntax-local-submodules', u'syntax-local-transforming-module-provides?', u'syntax-local-value', u'syntax-local-value/immediate', u'syntax-original?', u'syntax-position', u'syntax-property', u'syntax-property-preserved?', u'syntax-property-symbol-keys', u'syntax-protect', u'syntax-rearm', u'syntax-recertify', u'syntax-shift-phase-level', u'syntax-source', u'syntax-source-module', u'syntax-span', u'syntax-taint', u'syntax-tainted?', u'syntax-track-origin', u'syntax-transforming-module-expression?', u'syntax-transforming-with-lifts?', u'syntax-transforming?', u'syntax/c', u'syntax?', u'system', u'system*', u'system*/exit-code', u'system-big-endian?', u'system-idle-evt', u'system-language+country', u'system-library-subpath', u'system-path-convention-type', u'system-type', u'system/exit-code', u'tail-marks-match?', u'take', u'take-common-prefix', u'take-right', u'takef', u'takef-right', u'tan', u'tanh', u'tcp-abandon-port', u'tcp-accept', u'tcp-accept-evt', u'tcp-accept-ready?', u'tcp-accept/enable-break', u'tcp-addresses', u'tcp-close', u'tcp-connect', u'tcp-connect/enable-break', u'tcp-listen', u'tcp-listener?', u'tcp-port?', u'tentative-pretty-print-port-cancel', u'tentative-pretty-print-port-transfer', u'tenth', u'terminal-port?', u'the-unsupplied-arg', u'third', u'thread', u'thread-cell-ref', u'thread-cell-set!', u'thread-cell-values?', u'thread-cell?', u'thread-dead-evt', u'thread-dead?', u'thread-group?', u'thread-receive', u'thread-receive-evt', u'thread-resume', u'thread-resume-evt', u'thread-rewind-receive', u'thread-running?', u'thread-send', u'thread-suspend', u'thread-suspend-evt', u'thread-try-receive', u'thread-wait', u'thread/suspend-to-kill', u'thread?', u'time-apply', u'touch', u'transplant-input-port', u'transplant-output-port', u'true', u'truncate', u'udp-addresses', u'udp-bind!', u'udp-bound?', u'udp-close', u'udp-connect!', u'udp-connected?', u'udp-multicast-interface', u'udp-multicast-join-group!', u'udp-multicast-leave-group!', u'udp-multicast-loopback?', u'udp-multicast-set-interface!', u'udp-multicast-set-loopback!', u'udp-multicast-set-ttl!', u'udp-multicast-ttl', u'udp-open-socket', u'udp-receive!', u'udp-receive!*', u'udp-receive!-evt', u'udp-receive!/enable-break', u'udp-receive-ready-evt', u'udp-send', u'udp-send*', u'udp-send-evt', u'udp-send-ready-evt', u'udp-send-to', u'udp-send-to*', u'udp-send-to-evt', u'udp-send-to/enable-break', u'udp-send/enable-break', u'udp?', u'unbox', u'uncaught-exception-handler', u'unit?', u'unspecified-dom', u'unsupplied-arg?', u'use-collection-link-paths', u'use-compiled-file-paths', u'use-user-specific-search-paths', u'user-execute-bit', u'user-read-bit', u'user-write-bit', u'value-blame', u'value-contract', u'values', u'variable-reference->empty-namespace', u'variable-reference->module-base-phase', u'variable-reference->module-declaration-inspector', u'variable-reference->module-path-index', u'variable-reference->module-source', u'variable-reference->namespace', u'variable-reference->phase', u'variable-reference->resolved-module-path', u'variable-reference-constant?', u'variable-reference?', u'vector', u'vector->immutable-vector', u'vector->list', u'vector->pseudo-random-generator', u'vector->pseudo-random-generator!', u'vector->values', u'vector-append', u'vector-argmax', u'vector-argmin', u'vector-copy', u'vector-copy!', u'vector-count', u'vector-drop', u'vector-drop-right', u'vector-fill!', u'vector-filter', u'vector-filter-not', u'vector-immutable', u'vector-immutable/c', u'vector-immutableof', u'vector-length', u'vector-map', u'vector-map!', u'vector-member', u'vector-memq', u'vector-memv', u'vector-ref', u'vector-set!', u'vector-set*!', u'vector-set-performance-stats!', u'vector-split-at', u'vector-split-at-right', u'vector-take', u'vector-take-right', u'vector/c', u'vector?', u'vectorof', u'version', u'void', u'void?', u'weak-box-value', u'weak-box?', u'weak-set', u'weak-seteq', u'weak-seteqv', u'will-execute', u'will-executor?', u'will-register', u'will-try-execute', u'with-input-from-bytes', u'with-input-from-file', u'with-input-from-string', u'with-output-to-bytes', u'with-output-to-file', u'with-output-to-string', u'would-be-future', u'wrap-evt', u'wrapped-extra-arg-arrow', u'wrapped-extra-arg-arrow-extra-neg-party-argument', u'wrapped-extra-arg-arrow-real-func', u'wrapped-extra-arg-arrow?', u'writable<%>', u'write', u'write-byte', u'write-bytes', u'write-bytes-avail', u'write-bytes-avail*', u'write-bytes-avail-evt', u'write-bytes-avail/enable-break', u'write-char', u'write-special', u'write-special-avail*', u'write-special-evt', u'write-string', u'write-to-file', u'writeln', u'xor', u'zero?', u'~.a', u'~.s', u'~.v', u'~a', u'~e', u'~r', u'~s', u'~v' ) _opening_parenthesis = r'[([{]' _closing_parenthesis = r'[)\]}]' _delimiters = r'()[\]{}",\'`;\s' _symbol = r'(?u)(?:\|[^|]*\||\\[\w\W]|[^|\\%s]+)+' % _delimiters _exact_decimal_prefix = r'(?:#e)?(?:#d)?(?:#e)?' _exponent = r'(?:[defls][-+]?\d+)' _inexact_simple_no_hashes = r'(?:\d+(?:/\d+|\.\d*)?|\.\d+)' _inexact_simple = (r'(?:%s|(?:\d+#+(?:\.#*|/\d+#*)?|\.\d+#+|' r'\d+(?:\.\d*#+|/\d+#+)))' % _inexact_simple_no_hashes) _inexact_normal_no_hashes = r'(?:%s%s?)' % (_inexact_simple_no_hashes, _exponent) _inexact_normal = r'(?:%s%s?)' % (_inexact_simple, _exponent) _inexact_special = r'(?:(?:inf|nan)\.[0f])' _inexact_real = r'(?:[-+]?%s|[-+]%s)' % (_inexact_normal, _inexact_special) _inexact_unsigned = r'(?:%s|%s)' % (_inexact_normal, _inexact_special) tokens = { 'root': [ (_closing_parenthesis, Error), (r'(?!\Z)', Text, 'unquoted-datum') ], 'datum': [ (r'(?s)#;|#![ /]([^\\\n]|\\.)*', Comment), (u';[^\\n\\r\x85\u2028\u2029]*', Comment.Single), (r'#\|', Comment.Multiline, 'block-comment'), # Whitespaces (r'(?u)\s+', Text), # Numbers: Keep in mind Racket reader hash prefixes, which # can denote the base or the type. These don't map neatly # onto Pygments token types; some judgment calls here. # #d or no prefix (r'(?i)%s[-+]?\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters), Number.Integer, '#pop'), (r'(?i)%s[-+]?(\d+(\.\d*)?|\.\d+)([deflst][-+]?\d+)?(?=[%s])' % (_exact_decimal_prefix, _delimiters), Number.Float, '#pop'), (r'(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' % (_exact_decimal_prefix, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _delimiters), Number, '#pop'), # Inexact without explicit #i (r'(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%s@%s)(?=[%s])' % (_inexact_real, _inexact_unsigned, _inexact_unsigned, _inexact_real, _inexact_real, _delimiters), Number.Float, '#pop'), # The remaining extflonums (r'(?i)(([-+]?%st[-+]?\d+)|[-+](inf|nan)\.t)(?=[%s])' % (_inexact_simple, _delimiters), Number.Float, '#pop'), # #b (r'(?i)(#[ei])?#b%s' % _symbol, Number.Bin, '#pop'), # #o (r'(?i)(#[ei])?#o%s' % _symbol, Number.Oct, '#pop'), # #x (r'(?i)(#[ei])?#x%s' % _symbol, Number.Hex, '#pop'), # #i is always inexact, i.e. float (r'(?i)(#d)?#i%s' % _symbol, Number.Float, '#pop'), # Strings and characters (r'#?"', String.Double, ('#pop', 'string')), (r'#<<(.+)\n(^(?!\1$).*$\n)*^\1$', String.Heredoc, '#pop'), (r'#\\(u[\da-fA-F]{1,4}|U[\da-fA-F]{1,8})', String.Char, '#pop'), (r'(?is)#\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'), (r'(?s)#[pr]x#?"(\\?.)*?"', String.Regex, '#pop'), # Constants (r'#(true|false|[tTfF])', Name.Constant, '#pop'), # Keyword argument names (e.g. #:keyword) (r'#:%s' % _symbol, Keyword.Declaration, '#pop'), # Reader extensions (r'(#lang |#!)(\S+)', bygroups(Keyword.Namespace, Name.Namespace)), (r'#reader', Keyword.Namespace, 'quoted-datum'), # Other syntax (r"(?i)\.(?=[%s])|#c[is]|#['`]|#,@?" % _delimiters, Operator), (r"'|#[s&]|#hash(eqv?)?|#\d*(?=%s)" % _opening_parenthesis, Operator, ('#pop', 'quoted-datum')) ], 'datum*': [ (r'`|,@?', Operator), (_symbol, String.Symbol, '#pop'), (r'[|\\]', Error), default('#pop') ], 'list': [ (_closing_parenthesis, Punctuation, '#pop') ], 'unquoted-datum': [ include('datum'), (r'quote(?=[%s])' % _delimiters, Keyword, ('#pop', 'quoted-datum')), (r'`', Operator, ('#pop', 'quasiquoted-datum')), (r'quasiquote(?=[%s])' % _delimiters, Keyword, ('#pop', 'quasiquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')), (words(_keywords, prefix='(?u)', suffix='(?=[%s])' % _delimiters), Keyword, '#pop'), (words(_builtins, prefix='(?u)', suffix='(?=[%s])' % _delimiters), Name.Builtin, '#pop'), (_symbol, Name, '#pop'), include('datum*') ], 'unquoted-list': [ include('list'), (r'(?!\Z)', Text, 'unquoted-datum') ], 'quasiquoted-datum': [ include('datum'), (r',@?', Operator, ('#pop', 'unquoted-datum')), (r'unquote(-splicing)?(?=[%s])' % _delimiters, Keyword, ('#pop', 'unquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')), include('datum*') ], 'quasiquoted-list': [ include('list'), (r'(?!\Z)', Text, 'quasiquoted-datum') ], 'quoted-datum': [ include('datum'), (_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')), include('datum*') ], 'quoted-list': [ include('list'), (r'(?!\Z)', Text, 'quoted-datum') ], 'block-comment': [ (r'#\|', Comment.Multiline, '#push'), (r'\|#', Comment.Multiline, '#pop'), (r'[^#|]+|.', Comment.Multiline) ], 'string': [ (r'"', String.Double, '#pop'), (r'(?s)\\([0-7]{1,3}|x[\da-fA-F]{1,2}|u[\da-fA-F]{1,4}|' r'U[\da-fA-F]{1,8}|.)', String.Escape), (r'[^\\"]+', String.Double) ] }
RacketLexer
python
psf__black
tests/data/cases/ignore_pyi.py
{ "start": 190, "end": 315 }
class ____: ... def g(): # hi ... def h(): ... # bye # output def f(): # type: ignore ...
z
python
pytorch__pytorch
test/inductor/test_aot_inductor_custom_ops.py
{ "start": 15445, "end": 16202 }
class ____(LoggingTestCase): @make_logging_test(dynamic=logging.DEBUG) def test_shape_env_reuse(self, records): # make sure ShapeEnv is only created once and reused afterwards class Foo(torch.nn.Module): def forward(self, x): return x + 2 inputs = (torch.randn(4, 4),) dynamic_shapes = { "x": {0: Dim.AUTO, 1: Dim.AUTO}, } ep = export(Foo(), inputs, dynamic_shapes=dynamic_shapes, strict=False) with torch.no_grad(): torch._inductor.aot_compile(ep.module(), inputs) self.assertEqual([r.msg == "create_env" for r in records].count(True), 1) common_utils.instantiate_parametrized_tests(AOTInductorTestsTemplate)
AOTInductorLoggingTest
python
tensorflow__tensorflow
tensorflow/python/debug/cli/command_parser_test.py
{ "start": 15941, "end": 21677 }
class ____(test_util.TensorFlowTestCase): def testParseTimeInterval(self): self.assertEqual( command_parser.Interval(10, True, 1e3, True), command_parser.parse_time_interval("[10us, 1ms]")) self.assertEqual( command_parser.Interval(10, False, 1e3, False), command_parser.parse_time_interval("(10us, 1ms)")) self.assertEqual( command_parser.Interval(10, False, 1e3, True), command_parser.parse_time_interval("(10us, 1ms]")) self.assertEqual( command_parser.Interval(10, True, 1e3, False), command_parser.parse_time_interval("[10us, 1ms)")) self.assertEqual( command_parser.Interval(0, False, 1e3, True), command_parser.parse_time_interval("<=1ms")) self.assertEqual( command_parser.Interval(1e3, True, float("inf"), False), command_parser.parse_time_interval(">=1ms")) self.assertEqual( command_parser.Interval(0, False, 1e3, False), command_parser.parse_time_interval("<1ms")) self.assertEqual( command_parser.Interval(1e3, False, float("inf"), False), command_parser.parse_time_interval(">1ms")) def testParseTimeGreaterLessThanWithInvalidValueStrings(self): with self.assertRaisesRegex(ValueError, "Invalid value string after >= "): command_parser.parse_time_interval(">=wms") with self.assertRaisesRegex(ValueError, "Invalid value string after > "): command_parser.parse_time_interval(">Yms") with self.assertRaisesRegex(ValueError, "Invalid value string after <= "): command_parser.parse_time_interval("<= _ms") with self.assertRaisesRegex(ValueError, "Invalid value string after < "): command_parser.parse_time_interval("<-ms") def testParseTimeIntervalsWithInvalidValueStrings(self): with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"): command_parser.parse_time_interval("[wms, 10ms]") with self.assertRaisesRegex(ValueError, "Invalid second item in interval:"): command_parser.parse_time_interval("[ 0ms, _ms]") with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"): command_parser.parse_time_interval("(xms, _ms]") with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"): command_parser.parse_time_interval("((3ms, _ms)") def testInvalidTimeIntervalRaisesException(self): with self.assertRaisesRegex( ValueError, r"Invalid interval format: \[10us, 1ms. Valid formats are: " r"\[min, max\], \(min, max\), <max, >min"): command_parser.parse_time_interval("[10us, 1ms") with self.assertRaisesRegex( ValueError, r"Incorrect interval format: \[10us, 1ms, 2ms\]. Interval should " r"specify two values: \[min, max\] or \(min, max\)"): command_parser.parse_time_interval("[10us, 1ms, 2ms]") with self.assertRaisesRegex( ValueError, r"Invalid interval \[1s, 1ms\]. Start must be before end of interval."): command_parser.parse_time_interval("[1s, 1ms]") def testParseMemoryInterval(self): self.assertEqual( command_parser.Interval(1024, True, 2048, True), command_parser.parse_memory_interval("[1k, 2k]")) self.assertEqual( command_parser.Interval(1024, False, 2048, False), command_parser.parse_memory_interval("(1kB, 2kB)")) self.assertEqual( command_parser.Interval(1024, False, 2048, True), command_parser.parse_memory_interval("(1k, 2k]")) self.assertEqual( command_parser.Interval(1024, True, 2048, False), command_parser.parse_memory_interval("[1k, 2k)")) self.assertEqual( command_parser.Interval(0, False, 2048, True), command_parser.parse_memory_interval("<=2k")) self.assertEqual( command_parser.Interval(11, True, float("inf"), False), command_parser.parse_memory_interval(">=11")) self.assertEqual( command_parser.Interval(0, False, 2048, False), command_parser.parse_memory_interval("<2k")) self.assertEqual( command_parser.Interval(11, False, float("inf"), False), command_parser.parse_memory_interval(">11")) def testParseMemoryIntervalsWithInvalidValueStrings(self): with self.assertRaisesRegex(ValueError, "Invalid value string after >= "): command_parser.parse_time_interval(">=wM") with self.assertRaisesRegex(ValueError, "Invalid value string after > "): command_parser.parse_time_interval(">YM") with self.assertRaisesRegex(ValueError, "Invalid value string after <= "): command_parser.parse_time_interval("<= _MB") with self.assertRaisesRegex(ValueError, "Invalid value string after < "): command_parser.parse_time_interval("<-MB") def testInvalidMemoryIntervalRaisesException(self): with self.assertRaisesRegex( ValueError, r"Invalid interval \[5k, 3k\]. Start of interval must be less than or " "equal to end of interval."): command_parser.parse_memory_interval("[5k, 3k]") def testIntervalContains(self): interval = command_parser.Interval( start=1, start_included=True, end=10, end_included=True) self.assertTrue(interval.contains(1)) self.assertTrue(interval.contains(10)) self.assertTrue(interval.contains(5)) interval.start_included = False self.assertFalse(interval.contains(1)) self.assertTrue(interval.contains(10)) interval.end_included = False self.assertFalse(interval.contains(1)) self.assertFalse(interval.contains(10)) interval.start_included = True self.assertTrue(interval.contains(1)) self.assertFalse(interval.contains(10)) if __name__ == "__main__": googletest.main()
ParseInterval
python
keras-team__keras
keras/src/layers/preprocessing/data_layer_test.py
{ "start": 1223, "end": 3169 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_test( RandomRGBToHSVLayer, init_kwargs={ "seed": 1337, "data_format": "channels_last", }, input_shape=(1, 2, 2, 3), supports_masking=False, expected_output_shape=(1, 2, 2, 3), ) self.run_layer_test( RandomRGBToHSVLayer, init_kwargs={ "seed": 1337, "data_format": "channels_first", }, input_shape=(1, 3, 2, 2), supports_masking=False, expected_output_shape=(1, 3, 2, 2), ) def test_tf_data_compatibility(self): data_format = backend.config.image_data_format() if data_format == "channels_last": input_data = np.random.random((2, 8, 8, 3)).astype("float32") else: input_data = np.random.random((2, 3, 8, 8)).astype("float32") layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): self.assertDType(output, "float32") self.assertEqual(list(output.shape), list(input_data.shape)) def test_grain_compatibility(self): data_format = backend.config.image_data_format() if data_format == "channels_last": input_data = np.random.random((2, 8, 8, 3)).astype("float32") else: input_data = np.random.random((2, 3, 8, 8)).astype("float32") layer = RandomRGBToHSVLayer(data_format=data_format, seed=1337) ds = grain.MapDataset.source(input_data).batch(2).map(layer) for output in ds[:1]: self.assertDType(output, "float32") self.assertEqual(list(output.shape), list(input_data.shape))
DataLayerTest
python
networkx__networkx
networkx/readwrite/tests/test_graph6.py
{ "start": 646, "end": 980 }
class ____: def test_n_data_n_conversion(self): for i in [0, 1, 42, 62, 63, 64, 258047, 258048, 7744773, 68719476735]: assert g6.data_to_n(g6.n_to_data(i))[0] == i assert g6.data_to_n(g6.n_to_data(i))[1] == [] assert g6.data_to_n(g6.n_to_data(i) + [42, 43])[1] == [42, 43]
TestGraph6Utils
python
wntrblm__nox
nox/_decorators.py
{ "start": 1896, "end": 4684 }
class ____(FunctionDecorator): """This is a function decorator that adds additional Nox-specific metadata.""" def __init__( self, func: Callable[..., Any], python: _typing.Python = None, reuse_venv: bool | None = None, # noqa: FBT001 name: str | None = None, venv_backend: str | None = None, venv_params: Sequence[str] = (), should_warn: Mapping[str, Any] | None = None, tags: Sequence[str] | None = None, *, default: bool = True, requires: Sequence[str] | None = None, download_python: Literal["auto", "never", "always"] | None = None, ) -> None: self.func = func self.python = python self.reuse_venv = reuse_venv self.name = name self.venv_backend = venv_backend self.venv_params = venv_params self.should_warn = dict(should_warn or {}) self.tags = list(tags or []) self.default = default self.requires = list(requires or []) self.download_python = download_python def __repr__(self) -> str: return f"{self.__class__.__name__}(name={self.name!r})" def __call__(self, *args: Any, **kwargs: Any) -> Any: return self.func(*args, **kwargs) def copy(self, name: str | None = None) -> Func: """Copy this function with a new name.""" return Func( _copy_func(self.func, name), self.python, self.reuse_venv, name, self.venv_backend, self.venv_params, self.should_warn, self.tags, default=self.default, requires=self._requires, download_python=self.download_python, ) @property def requires(self) -> list[str]: # Compute dynamically on lookup since ``self.python`` can be modified after # creation (e.g. on an instance from ``self.copy``). return list(map(self.format_dependency, self._requires)) @requires.setter def requires(self, value: Sequence[str]) -> None: self._requires = list(value) def format_dependency(self, dependency: str) -> str: if isinstance(self.python, (bool, str)) or self.python is None: formatted = dependency.format(python=self.python, py=self.python) if ( self.python is None or isinstance(self.python, bool) ) and formatted != dependency: msg = "Cannot parametrize requires with {python} when python is None or a bool." raise ValueError(msg) return formatted msg = "The requires of a not-yet-parametrized session cannot be parametrized." # pragma: no cover raise TypeError(msg) # pragma: no cover
Func
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/validators/base/data_condition_group.py
{ "start": 431, "end": 3061 }
class ____(CamelSnakeSerializer): id = serializers.CharField(required=False) logic_type = serializers.ChoiceField([(t.value, t.value) for t in DataConditionGroup.Type]) conditions = serializers.ListField(required=False) def validate_conditions(self, value: list[dict[str, Any]]) -> list[dict[str, Any]]: conditions = [] for condition in value: condition_validator = BaseDataConditionValidator(data=condition) condition_validator.is_valid(raise_exception=True) conditions.append(condition_validator.validated_data) return conditions def update_or_create_condition(self, condition_data: dict[str, Any]) -> DataCondition: validator = BaseDataConditionValidator() condition_id = condition_data.get("id") if condition_id: try: condition = DataCondition.objects.get(id=condition_id) except DataConditionGroup.DoesNotExist: raise serializers.ValidationError(f"Condition with id {condition_id} not found.") condition = validator.update(condition, condition_data) else: condition = validator.create(condition_data) return condition def update( self, instance: DataConditionGroup, validated_data: dict[str, Any], ) -> DataConditionGroup: remove_items_by_api_input(validated_data.get("conditions", []), instance.conditions, "id") conditions = validated_data.pop("conditions", None) if conditions: for condition_data in conditions: if not condition_data.get("condition_group_id"): condition_data["condition_group_id"] = instance.id self.update_or_create_condition(condition_data) # update the condition group instance.update(**validated_data) return instance def create(self, validated_data: dict[str, Any]) -> DataConditionGroup: with transaction.atomic(router.db_for_write(DataConditionGroup)): condition_group = DataConditionGroup.objects.create( logic_type=validated_data["logic_type"], organization_id=self.context["organization"].id, ) for condition in validated_data["conditions"]: if not condition.get("condition_group_id"): condition["condition_group_id"] = condition_group.id condition_validator = BaseDataConditionValidator() condition_validator.create(condition) return condition_group
BaseDataConditionGroupValidator
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/range_test.py
{ "start": 6519, "end": 7351 }
class ____(checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_range_dataset(self, start, stop, options=None): dataset = dataset_ops.Dataset.range(start, stop) if options: dataset = dataset.with_options(options) return dataset @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(symbolic_checkpoint=[False, True]))) def test(self, verify_fn, symbolic_checkpoint): start = 2 stop = 10 options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint verify_fn(self, lambda: self._build_range_dataset(start, stop, options), stop - start)
RangeCheckpointTest
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 13135, "end": 13437 }
class ____(Sky2PixProjection, Zenithal): r""" Gnomonic Projection - sky to pixel. Corresponds to the ``TAN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cot \theta """
Sky2Pix_Gnomonic
python
coleifer__peewee
tests/regressions.py
{ "start": 24557, "end": 24695 }
class ____(TestModel): id = CharField() color = CharField() class Meta: primary_key = CompositeKey('id', 'color')
Product
python
great-expectations__great_expectations
great_expectations/self_check/sqlalchemy_connection_manager.py
{ "start": 318, "end": 1295 }
class ____: def __init__(self) -> None: self.lock = threading.Lock() self._connections: Dict[str, sqlalchemy.Connection] = {} def get_connection(self, connection_string): if sa is not None: with self.lock: if connection_string not in self._connections: try: engine = sa.create_engine(connection_string) conn = engine.connect() self._connections[connection_string] = conn except (ImportError, SQLAlchemyError) as e: print( f'Unable to establish connection with {connection_string} -- exception "{e}" occurred.' # noqa: E501 # FIXME CoP ) raise return self._connections[connection_string] return None connection_manager = SqlAlchemyConnectionManager()
SqlAlchemyConnectionManager
python
mlflow__mlflow
dev/clint/tests/rules/test_implicit_optional.py
{ "start": 273, "end": 337 }
class ____: x: str = None # Good good: Optional[int] = None
Bad
python
ray-project__ray
python/ray/tune/search/sample.py
{ "start": 4294, "end": 4545 }
class ____: def sample( self, domain: Domain, config: Optional[Union[List[Dict], Dict]] = None, size: int = 1, random_state: "RandomState" = None, ): raise NotImplementedError @DeveloperAPI
Sampler
python
doocs__leetcode
solution/2400-2499/2404.Most Frequent Even Element/Solution.py
{ "start": 0, "end": 280 }
class ____: def mostFrequentEven(self, nums: List[int]) -> int: cnt = Counter(x for x in nums if x % 2 == 0) ans, mx = -1, 0 for x, v in cnt.items(): if v > mx or (v == mx and ans > x): ans, mx = x, v return ans
Solution
python
apache__airflow
devel-common/src/tests_common/test_utils/mock_operators.py
{ "start": 3592, "end": 4520 }
class ____(BaseOperatorLink): """Custom Operator with Link for Google Custom Search.""" name = "Google Custom" def get_link(self, operator, *, ti_key): if AIRFLOW_V_3_0_PLUS: search_query = XCom.get_many( task_ids=ti_key.task_id, dag_ids=ti_key.dag_id, run_id=ti_key.run_id, map_indexes=ti_key.map_index, key="search_query", ).first() search_query = XCom.deserialize_value(search_query) else: search_query = XCom.get_one( task_id=ti_key.task_id, dag_id=ti_key.dag_id, run_id=ti_key.run_id, map_index=ti_key.map_index, key="search_query", ) if not search_query: return None return f"http://google.com/custom_base_link?search={search_query}"
CustomOpLink
python
uqfoundation__dill
dill/tests/test_nested.py
{ "start": 741, "end": 977 }
class ____(object): def __init__(self, augend): self.augend = augend self.zero = [0] def __call__(self, addend): return addend + self.augend + self.zero[0] # rewrite again, but as an old-style class
cadder
python
PyCQA__pylint
tests/functional/p/postponed/postponed_evaluation_pep585.py
{ "start": 896, "end": 1128 }
class ____(typing.NamedTuple): my_var: list[int] # Check typing.TypedDict CustomTypedDict = TypedDict("CustomTypedDict", my_var=list[int]) CustomTypedDict2 = TypedDict("CustomTypedDict2", {"my_var": list[int]})
CustomNamedTuple3
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/step_dependency_config.py
{ "start": 427, "end": 924 }
class ____: require_upstream_step_success: bool @staticmethod def default() -> "StepDependencyConfig": return StepDependencyConfig(require_upstream_step_success=True) @staticmethod def from_config(config_value: Optional[dict[str, bool]]) -> "StepDependencyConfig": return StepDependencyConfig( require_upstream_step_success=config_value["require_upstream_step_success"] if config_value else True, )
StepDependencyConfig
python
tensorflow__tensorflow
tensorflow/python/training/saver_test.py
{ "start": 116068, "end": 116166 }
class ____(CheckpointReaderTest): _WRITE_VERSION = saver_pb2.SaverDef.V2
CheckpointReaderForV2Test
python
ansible__ansible
lib/ansible/cli/doc.py
{ "start": 3073, "end": 17321 }
class ____(object): """A mixin containing all methods relevant to role argument specification functionality. Note: The methods for actual display of role data are not present here. """ # Potential locations of the role arg spec file in the meta subdir, with main.yml # having the lowest priority. ROLE_METADATA_FILES = ["main" + e for e in C.YAML_FILENAME_EXTENSIONS] ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ROLE_METADATA_FILES def _load_role_data(self, root, files, role_name, collection): """ Load and process the YAML for the first found of a set of role files :param str root: The root path to get the files from :param list files: List of candidate file names in order of precedence :param str role_name: The name of the role for which we want the argspec data. :param str collection: collection name or None in case of stand alone roles :returns: A dict that contains the data requested, empty if no data found """ if collection: meta_path = os.path.join(root, 'roles', role_name, 'meta') else: meta_path = os.path.join(root, 'meta') # Check all potential spec files path = None for specfile in files: full_path = os.path.join(meta_path, specfile) if os.path.exists(full_path): path = full_path break if path is None: return {} try: with open(path, 'r') as f: data = yaml.load(trust_as_template(f), Loader=AnsibleLoader) if data is None: data = {} except OSError as ex: raise AnsibleParserError(f"Could not read the role {role_name!r} at {path!r}.") from ex return data def _load_metadata(self, role_name, role_path, collection): """Load the roles metadata from the source file. :param str role_name: The name of the role for which we want the argspec data. :param str role_path: Path to the role/collection root. :param str collection: collection name or None in case of stand alone roles :returns: A dict of all role meta data, except ``argument_specs`` or an empty dict """ data = self._load_role_data(role_path, self.ROLE_METADATA_FILES, role_name, collection) del data['argument_specs'] return data def _load_argspec(self, role_name, role_path, collection): """Load the role argument spec data from the source file. :param str role_name: The name of the role for which we want the argspec data. :param str role_path: Path to the role/collection root. :param str collection: collection name or None in case of stand alone roles We support two files containing the role arg spec data: either meta/main.yml or meta/argument_spec.yml. The argument_spec.yml file will take precedence over the meta/main.yml file, if it exists. Data is NOT combined between the two files. :returns: A dict of all data underneath the ``argument_specs`` top-level YAML key in the argspec data file. Empty dict is returned if there is no data. """ try: data = self._load_role_data(role_path, self.ROLE_ARGSPEC_FILES, role_name, collection) data = data.get('argument_specs', {}) except Exception as e: # we keep error info, but let caller deal with it data = {'error': 'Failed to process role (%s): %s' % (role_name, to_native(e)), 'exception': e} return data def _find_all_normal_roles(self, role_paths, name_filters=None): """Find all non-collection roles that have an argument spec file. Note that argument specs do not actually need to exist within the spec file. :param role_paths: A tuple of one or more role paths. When a role with the same name is found in multiple paths, only the first-found role is returned. :param name_filters: A tuple of one or more role names used to filter the results. :returns: A set of tuples consisting of: role name, full role path """ found = set() found_names = set() for path in role_paths: if not os.path.isdir(path): continue # Check each subdir for an argument spec file for entry in os.listdir(path): role_path = os.path.join(path, entry) # Check all potential spec files for specfile in self.ROLE_ARGSPEC_FILES: full_path = os.path.join(role_path, 'meta', specfile) if os.path.exists(full_path): if name_filters is None or entry in name_filters: # select first-found role if entry not in found_names: found_names.add(entry) # None here stands for 'colleciton', which stand alone roles dont have # makes downstream code simpler by having same structure as collection roles found.add((entry, None, role_path)) # only read first existing spec break return found def _find_all_collection_roles(self, name_filters=None, collection_filter=None): """Find all collection roles with an argument spec file. Note that argument specs do not actually need to exist within the spec file. :param name_filters: A tuple of one or more role names used to filter the results. These might be fully qualified with the collection name (e.g., community.general.roleA) or not (e.g., roleA). :param collection_filter: A list of strings containing the FQCN of a collection which will be used to limit results. This filter will take precedence over the name_filters. :returns: A set of tuples consisting of: role name, collection name, collection path """ found = set() b_colldirs = list_collection_dirs(coll_filter=collection_filter) for b_path in b_colldirs: path = to_text(b_path, errors='surrogate_or_strict') if not (collname := _get_collection_name_from_path(b_path)): display.debug(f'Skipping invalid path {b_path!r}') continue roles_dir = os.path.join(path, 'roles') if os.path.exists(roles_dir): for entry in os.listdir(roles_dir): # Check all potential spec files for specfile in self.ROLE_ARGSPEC_FILES: full_path = os.path.join(roles_dir, entry, 'meta', specfile) if os.path.exists(full_path): if name_filters is None: found.add((entry, collname, path)) else: # Name filters might contain a collection FQCN or not. for fqcn in name_filters: if len(fqcn.split('.')) == 3: (ns, col, role) = fqcn.split('.') if '.'.join([ns, col]) == collname and entry == role: found.add((entry, collname, path)) elif fqcn == entry: found.add((entry, collname, path)) break return found def _build_summary(self, role, collection, meta, argspec): """Build a summary dict for a role. Returns a simplified role arg spec containing only the role entry points and their short descriptions, and the role collection name (if applicable). :param role: The simple role name. :param collection: The collection containing the role (None or empty string if N/A). :param meta: dictionary with galaxy information (None or empty string if N/A). :param argspec: The complete role argspec data dict. :returns: A tuple with the FQCN role name and a summary dict. """ if meta and meta.get('galaxy_info'): summary = meta['galaxy_info'] else: summary = {'description': 'UNDOCUMENTED'} summary['entry_points'] = {} if collection: fqcn = '.'.join([collection, role]) summary['collection'] = collection else: fqcn = role for ep in argspec.keys(): entry_spec = argspec[ep] or {} summary['entry_points'][ep] = entry_spec.get('short_description', '') return (fqcn, summary) def _build_doc(self, role, path, collection, argspec, entry_point): if collection: fqcn = '.'.join([collection, role]) else: fqcn = role doc = {} doc['path'] = path doc['collection'] = collection if 'error' in argspec: doc.update(argspec) else: doc['entry_points'] = {} for ep in argspec.keys(): if entry_point is None or ep == entry_point: entry_spec = argspec[ep] or {} doc['entry_points'][ep] = entry_spec # If we didn't add any entry points (b/c of filtering), ignore this entry. if len(doc['entry_points'].keys()) == 0: doc = None return (fqcn, doc) def _create_role_list(self, fail_on_errors=True): """Return a dict describing the listing of all roles with arg specs. :param role_paths: A tuple of one or more role paths. :returns: A dict indexed by role name, with 'collection' and 'entry_points' keys per role. Example return: results = { 'roleA': { 'collection': '', 'entry_points': { 'main': 'Short description for main' } }, 'a.b.c.roleB': { 'collection': 'a.b.c', 'entry_points': { 'main': 'Short description for main', 'alternate': 'Short description for alternate entry point' } 'x.y.z.roleB': { 'collection': 'x.y.z', 'entry_points': { 'main': 'Short description for main', } }, } """ roles_path = self._get_roles_path() collection_filter = self._get_collection_filter() if not collection_filter: roles = self._find_all_normal_roles(roles_path) else: roles = set() collroles = self._find_all_collection_roles(collection_filter=collection_filter) result = {} for role, collection, role_path in (roles | collroles): try: meta = self._load_metadata(role, role_path, collection) except Exception as e: display.vvv('No metadata for role (%s) due to: %s' % (role, to_native(e)), True) meta = {} argspec = self._load_argspec(role, role_path, collection) if 'error' in argspec: if fail_on_errors: raise argspec['exception'] else: display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True) continue fqcn, summary = self._build_summary(role, collection, meta, argspec) result[fqcn] = summary return result def _create_role_doc(self, role_names, entry_point=None, fail_on_errors=True): """ :param role_names: A tuple of one or more role names. :param role_paths: A tuple of one or more role paths. :param entry_point: A role entry point name for filtering. :param fail_on_errors: When set to False, include errors in the JSON output instead of raising errors :returns: A dict indexed by role name, with 'collection', 'entry_points', and 'path' keys per role. """ roles_path = self._get_roles_path() roles = self._find_all_normal_roles(roles_path, name_filters=role_names) collroles = self._find_all_collection_roles(name_filters=role_names) result = {} for role, collection, role_path in (roles | collroles): argspec = self._load_argspec(role, role_path, collection) if 'error' in argspec: if fail_on_errors: raise argspec['exception'] else: display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True) continue fqcn, doc = self._build_doc(role, role_path, collection, argspec, entry_point) if doc: result[fqcn] = doc return result def _doclink(url): # assume that if it is relative, it is for docsite, ignore rest if not url.startswith(("http", "..")): url = get_versioned_doclink(url) return url def _format(string, *args): """ add ascii formatting or delimiters """ for style in args: if style not in ref_style and style.upper() not in STYLE and style not in C.COLOR_CODES: raise KeyError("Invalid format value supplied: %s" % style) if C.ANSIBLE_NOCOLOR: # ignore most styles, but some already had 'identifier strings' if style in NOCOLOR: string = NOCOLOR[style] % string elif style in C.COLOR_CODES: string = stringc(string, style) elif style in ref_style: # assumes refs are also always colors string = stringc(string, ref_style[style]) else: # start specific style and 'end' with normal string = '%s%s%s' % (STYLE[style.upper()], string, STYLE['NORMAL']) return string
RoleMixin
python
getsentry__sentry
tests/sentry/api/endpoints/test_project_servicehook_details.py
{ "start": 111, "end": 650 }
class ____(APITestCase): def test_simple(self) -> None: project = self.create_project() hook = ServiceHook.objects.get_or_create( project_id=project.id, actor_id=self.user.id, url="http://example.com" )[0] self.login_as(user=self.user) path = f"/api/0/projects/{project.organization.slug}/{project.slug}/hooks/{hook.guid}/" response = self.client.get(path) assert response.status_code == 200 assert response.data["id"] == hook.guid
ProjectServiceHookDetailsTest
python
huggingface__transformers
src/transformers/models/speecht5/modeling_speecht5.py
{ "start": 20786, "end": 21441 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states
SpeechT5FeatureProjection
python
faif__python-patterns
patterns/structural/mvc.py
{ "start": 1425, "end": 1976 }
class ____(ABC): """The View is the presentation layer of the application.""" @abstractmethod def show_item_list(self, item_type: str, item_list: list) -> None: pass @abstractmethod def show_item_information( self, item_type: str, item_name: str, item_info: dict ) -> None: """Will look for item information by iterating over key,value pairs yielded by item_info.items()""" pass @abstractmethod def item_not_found(self, item_type: str, item_name: str) -> None: pass
View
python
django__django
tests/validation/test_error_messages.py
{ "start": 113, "end": 4379 }
class ____(TestCase): def _test_validation_messages(self, field, value, expected): with self.assertRaises(ValidationError) as cm: field.clean(value, None) self.assertEqual(cm.exception.messages, expected) def test_autofield_field_raises_error_message(self): f = models.AutoField(primary_key=True) self._test_validation_messages(f, "fõo", ["“fõo” value must be an integer."]) def test_integer_field_raises_error_message(self): f = models.IntegerField() self._test_validation_messages(f, "fõo", ["“fõo” value must be an integer."]) def test_boolean_field_raises_error_message(self): f = models.BooleanField() self._test_validation_messages( f, "fõo", ["“fõo” value must be either True or False."] ) def test_nullable_boolean_field_raises_error_message(self): f = models.BooleanField(null=True) self._test_validation_messages( f, "fõo", ["“fõo” value must be either True, False, or None."] ) def test_float_field_raises_error_message(self): f = models.FloatField() self._test_validation_messages(f, "fõo", ["“fõo” value must be a float."]) def test_decimal_field_raises_error_message(self): f = models.DecimalField() self._test_validation_messages( f, "fõo", ["“fõo” value must be a decimal number."] ) def test_null_boolean_field_raises_error_message(self): f = models.BooleanField(null=True) self._test_validation_messages( f, "fõo", ["“fõo” value must be either True, False, or None."] ) def test_date_field_raises_error_message(self): f = models.DateField() self._test_validation_messages( f, "fõo", [ "“fõo” value has an invalid date format. It must be in YYYY-MM-DD " "format." ], ) self._test_validation_messages( f, "aaaa-10-10", [ "“aaaa-10-10” value has an invalid date format. It must be in " "YYYY-MM-DD format." ], ) self._test_validation_messages( f, "2011-13-10", [ "“2011-13-10” value has the correct format (YYYY-MM-DD) but it is an " "invalid date." ], ) self._test_validation_messages( f, "2011-10-32", [ "“2011-10-32” value has the correct format (YYYY-MM-DD) but it is an " "invalid date." ], ) def test_datetime_field_raises_error_message(self): f = models.DateTimeField() # Wrong format self._test_validation_messages( f, "fõo", [ "“fõo” value has an invalid format. It must be in " "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format." ], ) # Correct format but invalid date self._test_validation_messages( f, "2011-10-32", [ "“2011-10-32” value has the correct format (YYYY-MM-DD) but it is an " "invalid date." ], ) # Correct format but invalid date/time self._test_validation_messages( f, "2011-10-32 10:10", [ "“2011-10-32 10:10” value has the correct format " "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) but it is an invalid date/time." ], ) def test_time_field_raises_error_message(self): f = models.TimeField() # Wrong format self._test_validation_messages( f, "fõo", [ "“fõo” value has an invalid format. It must be in HH:MM[:ss[.uuuuuu]] " "format." ], ) # Correct format but invalid time self._test_validation_messages( f, "25:50", [ "“25:50” value has the correct format (HH:MM[:ss[.uuuuuu]]) but it is " "an invalid time." ], )
ValidationMessagesTest
python
sympy__sympy
sympy/physics/quantum/tests/test_anticommutator.py
{ "start": 820, "end": 915 }
class ____(Operator): def _eval_anticommutator_Bar(self, bar): return Integer(0)
Foo
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/models.py
{ "start": 1466, "end": 1692 }
class ____: """Record of context gathering operations.""" correlation_id: str timestamp: str files_analyzed: list[str] patterns_detected: list[str] decisions_made: dict[str, Any] @record
ContextGathering
python
apache__airflow
airflow-core/src/airflow/lineage/hook.py
{ "start": 12505, "end": 13310 }
class ____(LoggingMixin): """Class used to retrieve the hook lineage information collected by HookLineageCollector.""" def __init__(self, **kwargs): super().__init__(**kwargs) self.lineage_collector = get_hook_lineage_collector() def retrieve_hook_lineage(self) -> HookLineage: """Retrieve hook lineage from HookLineageCollector.""" hook_lineage = self.lineage_collector.collected_assets return hook_lineage @cache def get_hook_lineage_collector() -> HookLineageCollector: """Get singleton lineage collector.""" from airflow import plugins_manager plugins_manager.initialize_hook_lineage_readers_plugins() if plugins_manager.hook_lineage_reader_classes: return HookLineageCollector() return NoOpCollector()
HookLineageReader
python
pandas-dev__pandas
doc/source/conf.py
{ "start": 20276, "end": 29453 }
class ____(Autosummary): """ This alternative autosummary class lets us override the table summary for Series.plot and DataFrame.plot in the API docs. """ def _replace_pandas_items(self, display_name, sig, summary, real_name): # this a hack: ideally we should extract the signature from the # .__call__ method instead of hard coding this if display_name == "DataFrame.plot": sig = "([x, y, kind, ax, ....])" summary = "DataFrame plotting accessor and method" elif display_name == "Series.plot": sig = "([kind, ax, figsize, ....])" summary = "Series plotting accessor and method" return (display_name, sig, summary, real_name) @staticmethod def _is_deprecated(real_name): try: obj, parent, modname = _import_by_name(real_name) except ImportError: return False doc = NumpyDocString(obj.__doc__ or "") summary = "".join(doc["Summary"] + doc["Extended Summary"]) return ".. deprecated::" in summary def _add_deprecation_prefixes(self, items): for item in items: display_name, sig, summary, real_name = item if self._is_deprecated(real_name): summary = f"(DEPRECATED) {summary}" yield display_name, sig, summary, real_name def get_items(self, names): items = Autosummary.get_items(self, names) items = [self._replace_pandas_items(*item) for item in items] items = list(self._add_deprecation_prefixes(items)) return items # based on numpy doc/source/conf.py def linkcode_resolve(domain, info) -> str | None: """ Determine the URL corresponding to Python object """ if domain != "py": return None modname = info["module"] fullname = info["fullname"] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split("."): try: with warnings.catch_warnings(): # Accessing deprecated objects will generate noisy warnings warnings.simplefilter("ignore", FutureWarning) obj = getattr(obj, part) except AttributeError: return None try: fn = inspect.getsourcefile(inspect.unwrap(obj)) except TypeError: try: # property fn = inspect.getsourcefile(inspect.unwrap(obj.fget)) except (AttributeError, TypeError): fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except TypeError: try: # property source, lineno = inspect.getsourcelines(obj.fget) except (AttributeError, TypeError): lineno = None except OSError: lineno = None if lineno: linespec = f"#L{lineno}-L{lineno + len(source) - 1}" else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__)) if "+" in pandas.__version__: return f"https://github.com/pandas-dev/pandas/blob/main/pandas/{fn}{linespec}" else: return ( f"https://github.com/pandas-dev/pandas/blob/" f"v{pandas.__version__}/pandas/{fn}{linespec}" ) # remove the docstring of the flags attribute (inherited from numpy ndarray) # because these give doc build errors (see GH issue 5331) def remove_flags_docstring(app, what, name, obj, options, lines) -> None: if what == "attribute" and name.endswith(".flags"): del lines[:] def process_class_docstrings(app, what, name, obj, options, lines) -> None: """ For those classes for which we use :: :template: autosummary/class_without_autosummary.rst the documented attributes/methods have to be listed in the class docstring. However, if one of those lists is empty, we use 'None', which then generates warnings in sphinx / ugly html output. This "autodoc-process-docstring" event connector removes that part from the processed docstring. """ if what == "class": joined = "\n".join(lines) templates = [ """.. rubric:: Attributes .. autosummary:: :toctree: None """, """.. rubric:: Methods .. autosummary:: :toctree: None """, ] for template in templates: if template in joined: joined = joined.replace(template, "") lines[:] = joined.split("\n") _BUSINED_ALIASES = [ "pandas.tseries.offsets." + name for name in [ "BDay", "CDay", "BMonthEnd", "BMonthBegin", "CBMonthEnd", "CBMonthBegin", ] ] def process_business_alias_docstrings(app, what, name, obj, options, lines) -> None: """ Starting with sphinx 3.4, the "autodoc-process-docstring" event also gets called for alias classes. This results in numpydoc adding the methods/attributes to the docstring, which we don't want (+ this causes warnings with sphinx). """ if name in _BUSINED_ALIASES: lines[:] = [] suppress_warnings = [ # We "overwrite" autosummary with our PandasAutosummary, but # still want the regular autosummary setup to run. So we just # suppress this warning. "app.add_directive" ] if pattern: # When building a single document we don't want to warn because references # to other documents are unknown, as it's expected suppress_warnings.append("ref.ref") def rstjinja(app, docname, source) -> None: """ Render our pages as a jinja template for fancy templating goodness. """ # https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/ # Make sure we're outputting HTML if app.builder.format != "html": return src = source[0] rendered = app.builder.templates.render_string(src, app.config.html_context) source[0] = rendered def setup(app) -> None: app.connect("source-read", rstjinja) app.connect("autodoc-process-docstring", remove_flags_docstring) app.connect("autodoc-process-docstring", process_class_docstrings) app.connect("autodoc-process-docstring", process_business_alias_docstrings) app.add_autodocumenter(AccessorDocumenter) app.add_autodocumenter(AccessorAttributeDocumenter) app.add_autodocumenter(AccessorMethodDocumenter) app.add_autodocumenter(AccessorCallableDocumenter) app.add_directive("autosummary", PandasAutosummary) # Ignore list for broken links,found in CI run checks for broken-linkcheck.yml linkcheck_ignore = [ "^http://$", "^https://$", *[ re.escape(link) for link in [ "http://scatterci.github.io/pydata/pandas", "http://specs.frictionlessdata.io/json-table-schema/", "https://crates.io/crates/calamine", "https://devguide.python.org/setup/#macos", "https://en.wikipedia.org/wiki/Imputation_statistics", "https://en.wikipedia.org/wiki/Imputation_(statistics", "https://github.com/noatamir/pandas-dev", "https://github.com/pandas-dev/pandas/blob/main/pandas/plotting/__init__.py#L1", "https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568", "https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495", "https://github.com/pandas-dev/pandas/issues/174151", "https://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/", "https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table", "https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html", "https://nbviewer.ipython.org/gist/metakermit/5720498", "https://numpy.org/doc/stable/user/basics.byteswapping.html", "https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking", "https://pandas.pydata.org/pandas-docs/stable/ecosystem.html", "https://sqlalchemy.readthedocs.io/en/latest/dialects/index.html", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245912.htm", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000214639.htm", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002283942.htm", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a000245965.htm", "https://support.sas.com/documentation/cdl/en/imlug/66845/HTML/default/viewer.htm#imlug_langref_sect455.htm", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002284668.htm", "https://support.sas.com/documentation/cdl/en/lrdict/64316/HTML/default/viewer.htm#a002978282.htm", "https://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/", "https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2022", "pandas.zip", ] ], ]
PandasAutosummary
python
langchain-ai__langchain
libs/core/langchain_core/callbacks/base.py
{ "start": 6472, "end": 10890 }
class ____: """Mixin for callback manager.""" def on_llm_start( self, serialized: dict[str, Any], prompts: list[str], *, run_id: UUID, parent_run_id: UUID | None = None, tags: list[str] | None = None, metadata: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Run when LLM starts running. !!! warning This method is called for non-chat models (regular LLMs). If you're implementing a handler for a chat model, you should use `on_chat_model_start` instead. Args: serialized: The serialized LLM. prompts: The prompts. run_id: The run ID. This is the ID of the current run. parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. metadata: The metadata. **kwargs: Additional keyword arguments. """ def on_chat_model_start( self, serialized: dict[str, Any], messages: list[list[BaseMessage]], *, run_id: UUID, parent_run_id: UUID | None = None, tags: list[str] | None = None, metadata: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Run when a chat model starts running. !!! warning This method is called for chat models. If you're implementing a handler for a non-chat model, you should use `on_llm_start` instead. Args: serialized: The serialized chat model. messages: The messages. run_id: The run ID. This is the ID of the current run. parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. metadata: The metadata. **kwargs: Additional keyword arguments. """ # NotImplementedError is thrown intentionally # Callback handler will fall back to on_llm_start if this is exception is thrown msg = f"{self.__class__.__name__} does not implement `on_chat_model_start`" raise NotImplementedError(msg) def on_retriever_start( self, serialized: dict[str, Any], query: str, *, run_id: UUID, parent_run_id: UUID | None = None, tags: list[str] | None = None, metadata: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Run when the Retriever starts running. Args: serialized: The serialized Retriever. query: The query. run_id: The run ID. This is the ID of the current run. parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. metadata: The metadata. **kwargs: Additional keyword arguments. """ def on_chain_start( self, serialized: dict[str, Any], inputs: dict[str, Any], *, run_id: UUID, parent_run_id: UUID | None = None, tags: list[str] | None = None, metadata: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Run when a chain starts running. Args: serialized: The serialized chain. inputs: The inputs. run_id: The run ID. This is the ID of the current run. parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. metadata: The metadata. **kwargs: Additional keyword arguments. """ def on_tool_start( self, serialized: dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: UUID | None = None, tags: list[str] | None = None, metadata: dict[str, Any] | None = None, inputs: dict[str, Any] | None = None, **kwargs: Any, ) -> Any: """Run when the tool starts running. Args: serialized: The serialized chain. input_str: The input string. run_id: The run ID. This is the ID of the current run. parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. metadata: The metadata. inputs: The inputs. **kwargs: Additional keyword arguments. """
CallbackManagerMixin
python
python__mypy
mypyc/test/test_optimizations.py
{ "start": 1870, "end": 2064 }
class ____(OptimizationSuite): files = ["opt-copy-propagation.test"] def do_optimizations(self, fn: FuncIR) -> None: do_copy_propagation(fn, CompilerOptions())
TestCopyPropagation
python
bokeh__bokeh
src/bokeh/models/expressions.py
{ "start": 7007, "end": 7341 }
class ____(ScalarExpression): """ Computes maximum value of a data source's column. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) field = Required(String) initial = Nullable(Float, default=-inf) @abstract
Maximum
python
langchain-ai__langchain
libs/partners/fireworks/tests/integration_tests/test_standard.py
{ "start": 354, "end": 1013 }
class ____(ChatModelIntegrationTests): @property def chat_model_class(self) -> type[BaseChatModel]: return ChatFireworks @property def chat_model_params(self) -> dict: return { "model": "fireworks/kimi-k2-instruct-0905", "temperature": 0, } @pytest.mark.xfail(reason="Not yet implemented.") def test_tool_message_histories_list_content( self, model: BaseChatModel, my_adder_tool: BaseTool ) -> None: super().test_tool_message_histories_list_content(model, my_adder_tool) @property def supports_json_mode(self) -> bool: return True
TestFireworksStandard
python
keras-team__keras
keras/src/utils/torch_utils_test.py
{ "start": 1844, "end": 12048 }
class ____(testing.TestCase): @parameterized.parameters( {"use_batch_norm": False, "num_torch_layers": 1}, {"use_batch_norm": True, "num_torch_layers": 1}, ) def test_basic_usage(self, use_batch_norm, num_torch_layers): model = Classifier(use_batch_norm, num_torch_layers) self.assertEqual(len(model.layers), 2) # Linear - Weights, bias, BN - beta, gamma torch_trainable_count = 0 for i, layer in zip(range(num_torch_layers), model.torch_wrappers): layer_trainable_count = 2 if use_batch_norm: layer_trainable_count += 2 self.assertEqual( len(layer.trainable_weights), layer_trainable_count ) torch_trainable_count += layer_trainable_count model(np.random.random((3, 2))) self.assertEqual(len(model.layers), 2 * num_torch_layers) self.assertEqual( len(model.trainable_weights), torch_trainable_count + 2 ) model.compile(optimizer="sgd", loss="mse") model.fit(np.random.random((3, 2)), np.random.random((3, 1))) @parameterized.named_parameters( ( "explicit_torch_wrapper", Classifier, {"use_batch_norm": True, "num_torch_layers": 1}, ), ("implicit_torch_wrapper", ClassifierWithNoSpecialCasing, {}), ) def test_training_args(self, cls, kwargs): model = cls(**kwargs) model(np.random.random((3, 2)), training=False) # Eager call to build ref_weights = model.get_weights() ref_running_mean = backend.convert_to_numpy( model.torch_wrappers[0].module[-1].running_mean if cls is Classifier else model.bn1.module.running_mean ) # Test training=False doesn't affect model weights model(np.random.random((3, 2)), training=False) weights = model.get_weights() for w, ref_w in zip(weights, ref_weights): self.assertAllClose(w, ref_w) # Test training=None affects BN's stats model.set_weights(ref_weights) # Restore previous weights model(np.random.random((3, 2))) running_mean = backend.convert_to_numpy( model.torch_wrappers[0].module[-1].running_mean if cls is Classifier else model.bn1.module.running_mean ) self.assertNotAllClose(running_mean, ref_running_mean) # Test training=True affects BN's stats model.set_weights(ref_weights) # Restore previous weights model(np.random.random((3, 2)), training=True) running_mean = backend.convert_to_numpy( model.torch_wrappers[0].module[-1].running_mean if cls is Classifier else model.bn1.module.running_mean ) self.assertNotAllClose(running_mean, ref_running_mean) def test_module_autowrapping(self): model = ClassifierWithNoSpecialCasing() self.assertIsInstance(model.fc1, TorchModuleWrapper) self.assertIsInstance(model.bn1, TorchModuleWrapper) self.assertIsInstance(model.fc2, TorchModuleWrapper) self.assertFalse(isinstance(model.fc3, TorchModuleWrapper)) self.assertEqual(len(model.fc1.trainable_weights), 2) self.assertEqual(len(model.bn1.trainable_weights), 2) self.assertEqual(len(model.fc2.trainable_weights), 2) model(np.random.random((3, 2))) self.assertEqual(len(model.layers), 4) self.assertEqual(len(model.fc3.trainable_weights), 2) self.assertEqual(len(model.trainable_weights), 8) model.compile(optimizer="sgd", loss="mse") model.fit(np.random.random((3, 2)), np.random.random((3, 2))) def test_load_weights_autowrapping(self): # Test loading weights temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.weights.h5") model = ClassifierWithNoSpecialCasing() model.compile(optimizer="sgd", loss="mse") x, y = np.random.random((3, 2)), np.random.random((3, 1)) x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1)) model.fit(x, y) ref_loss = model.evaluate(x_test, y_test) model.save_weights(temp_filepath) new_model = ClassifierWithNoSpecialCasing() new_model(np.random.random((3, 2))) new_model.compile(optimizer="sgd", loss="mse") new_model.load_weights(temp_filepath) for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()): self.assertAllClose(ref_w, new_w, atol=1e-5) loss = new_model.evaluate(x_test, y_test) self.assertAllClose(ref_loss, loss, atol=1e-5) def test_serialize_model_autowrapping(self): # Test loading saved model temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") model = ClassifierWithNoSpecialCasing() model.compile(optimizer="sgd", loss="mse") x, y = np.random.random((3, 2)), np.random.random((3, 1)) x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1)) model.fit(x, y) ref_loss = model.evaluate(x_test, y_test) model.save(temp_filepath) new_model = saving.load_model(temp_filepath) for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()): self.assertAllClose(ref_w, new_w, atol=1e-5) loss = new_model.evaluate(x_test, y_test) self.assertAllClose(ref_loss, loss, atol=1e-5) @parameterized.parameters( {"use_batch_norm": False, "num_torch_layers": 1}, {"use_batch_norm": True, "num_torch_layers": 1}, {"use_batch_norm": False, "num_torch_layers": 2}, {"use_batch_norm": True, "num_torch_layers": 2}, ) def test_load_weights(self, use_batch_norm, num_torch_layers): # Test loading weights temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.weights.h5") model = Classifier(use_batch_norm, num_torch_layers) model.compile(optimizer="sgd", loss="mse") x, y = np.random.random((3, 2)), np.random.random((3, 1)) x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1)) model.fit(x, y) ref_loss = model.evaluate(x_test, y_test) model.save_weights(temp_filepath) new_model = Classifier(use_batch_norm, num_torch_layers) new_model(np.random.random((3, 2))) new_model.compile(optimizer="sgd", loss="mse") new_model.load_weights(temp_filepath) for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()): self.assertAllClose(ref_w, new_w, atol=1e-5) loss = new_model.evaluate(x_test, y_test) self.assertAllClose(ref_loss, loss, atol=1e-5) @parameterized.parameters( {"use_batch_norm": False, "num_torch_layers": 1}, {"use_batch_norm": True, "num_torch_layers": 1}, {"use_batch_norm": False, "num_torch_layers": 2}, {"use_batch_norm": True, "num_torch_layers": 2}, ) def test_serialize_model(self, use_batch_norm, num_torch_layers): # Test loading saved model temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") model = Classifier(use_batch_norm, num_torch_layers) model.compile(optimizer="sgd", loss="mse") x, y = np.random.random((3, 2)), np.random.random((3, 1)) x_test, y_test = np.random.random((3, 2)), np.random.random((3, 1)) model.fit(x, y) ref_loss = model.evaluate(x_test, y_test) model.save(temp_filepath) new_model = saving.load_model(temp_filepath) for ref_w, new_w in zip(model.get_weights(), new_model.get_weights()): self.assertAllClose(ref_w, new_w, atol=1e-5) loss = new_model.evaluate(x_test, y_test) self.assertAllClose(ref_loss, loss, atol=1e-5) def test_from_config(self): module = torch.nn.Sequential(torch.nn.Linear(2, 4)) mw = TorchModuleWrapper(module) config = mw.get_config() new_mw = TorchModuleWrapper.from_config(config) for ref_w, new_w in zip(mw.get_weights(), new_mw.get_weights()): self.assertAllClose(ref_w, new_w, atol=1e-5) def test_build_model(self): x = keras.Input([4]) z = TorchModuleWrapper(torch.nn.Linear(4, 8), output_shape=[None, 8])(x) y = TorchModuleWrapper(torch.nn.Linear(8, 16), output_shape=[None, 16])( z ) model = keras.Model(x, y) self.assertEqual(model.predict(np.zeros([5, 4])).shape, (5, 16)) self.assertEqual(model(np.zeros([5, 4])).shape, (5, 16)) @parameterized.named_parameters( ("safe_mode", True), ("unsafe_mode", False), ) def test_save_load(self, safe_mode): @keras.saving.register_keras_serializable() class M(keras.Model): def __init__(self, module, **kwargs): super().__init__(**kwargs) self.module = module def call(self, x): return self.module(x) def get_config(self): base_config = super().get_config() config = {"module": self.module} return {**base_config, **config} @classmethod def from_config(cls, config): config["module"] = saving.deserialize_keras_object( config["module"] ) return cls(**config) m = M(torch.nn.Conv2d(1, 10, kernel_size=(3, 3))) device = get_device() # Get the current device (e.g., "cuda" or "cpu") x = torch.ones( (10, 1, 28, 28), device=device ) # Place input on the correct device ref_output = m(x) temp_filepath = os.path.join(self.get_temp_dir(), "mymodel.keras") m.save(temp_filepath) if safe_mode: with self.assertRaisesRegex(ValueError, "arbitrary code execution"): saving.load_model(temp_filepath, safe_mode=safe_mode) else: new_model = saving.load_model(temp_filepath, safe_mode=safe_mode) self.assertAllClose(new_model(x), ref_output)
TorchUtilsTest