language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pallets__jinja
src/jinja2/nodes.py
{ "start": 20717, "end": 21975 }
class ____(Expr): """A conditional expression (inline if expression). (``{{ foo if bar else baz }}``) """ fields = ("test", "expr1", "expr2") test: Expr expr1: Expr expr2: Expr | None def as_const(self, eval_ctx: EvalContext | None = None) -> t.Any: eval_ctx = get_eval_context(self, eval_ctx) if self.test.as_const(eval_ctx): return self.expr1.as_const(eval_ctx) # if we evaluate to an undefined object, we better do that at runtime if self.expr2 is None: raise Impossible() return self.expr2.as_const(eval_ctx) def args_as_const( node: t.Union["_FilterTestCommon", "Call"], eval_ctx: EvalContext | None ) -> tuple[list[t.Any], dict[t.Any, t.Any]]: args = [x.as_const(eval_ctx) for x in node.args] kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs) if node.dyn_args is not None: try: args.extend(node.dyn_args.as_const(eval_ctx)) except Exception as e: raise Impossible() from e if node.dyn_kwargs is not None: try: kwargs.update(node.dyn_kwargs.as_const(eval_ctx)) except Exception as e: raise Impossible() from e return args, kwargs
CondExpr
python
spack__spack
lib/spack/spack/vendor/six.py
{ "start": 14764, "end": 17668 }
class ____(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), MovedAttribute("parse_http_list", "urllib2", "urllib.request"), MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request")
Module_six_moves_urllib_request
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py
{ "start": 16978, "end": 17178 }
class ____(graphene.Union): class Meta: types = (GraphenePartitionSets, GraphenePipelineNotFoundError, GraphenePythonError) name = "PartitionSetsOrError"
GraphenePartitionSetsOrError
python
spack__spack
lib/spack/spack/cmd/create.py
{ "start": 5978, "end": 6241 }
class ____(PackageTemplate): """Provides appropriate overrides for cargo-based packages""" base_class_name = "CargoPackage" package_class_import = "from spack_repo.builtin.build_systems.cargo import CargoPackage" body_def = ""
CargoPackageTemplate
python
networkx__networkx
networkx/generators/tests/test_lattice.py
{ "start": 7950, "end": 10102 }
class ____: "Tests for :func:`networkx.generators.lattice.hexagonal_lattice_graph`" def test_lattice_points(self): """Tests that the graph is really a hexagonal lattice.""" for m, n in [(4, 5), (4, 4), (4, 3), (3, 2), (3, 3), (3, 5)]: G = nx.hexagonal_lattice_graph(m, n) assert len(G) == 2 * (m + 1) * (n + 1) - 2 C_6 = nx.cycle_graph(6) hexagons = [ [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)], [(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4)], [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)], [(2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2)], [(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4)], ] for hexagon in hexagons: assert nx.is_isomorphic(G.subgraph(hexagon), C_6) def test_directed(self): """Tests for creating a directed hexagonal lattice.""" G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.DiGraph()) assert H.is_directed() pos = nx.get_node_attributes(H, "pos") for u, v in H.edges(): assert pos[v][1] >= pos[u][1] if pos[v][1] == pos[u][1]: assert pos[v][0] > pos[u][0] def test_multigraph(self): """Tests for creating a hexagonal lattice multigraph.""" G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph()) H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.MultiGraph()) assert list(H.edges()) == list(G.edges()) def test_periodic(self): G = nx.hexagonal_lattice_graph(4, 6, periodic=True) assert len(G) == 48 assert G.size() == 72 # all degrees are 3 assert len([n for n, d in G.degree() if d != 3]) == 0 G = nx.hexagonal_lattice_graph(5, 8, periodic=True) HLG = nx.hexagonal_lattice_graph pytest.raises(nx.NetworkXError, HLG, 2, 7, periodic=True) pytest.raises(nx.NetworkXError, HLG, 1, 4, periodic=True) pytest.raises(nx.NetworkXError, HLG, 2, 1, periodic=True)
TestHexagonalLatticeGraph
python
tornadoweb__tornado
tornado/test/ioloop_test.py
{ "start": 26620, "end": 27988 }
class ____(unittest.TestCase): def run_python(self, *statements): stmt_list = [ "from tornado.ioloop import IOLoop", "classname = lambda x: x.__class__.__name__", ] + list(statements) args = [sys.executable, "-c", "; ".join(stmt_list)] return native_str(subprocess.check_output(args)).strip() def test_default(self): # When asyncio is available, it is used by default. cls = self.run_python("print(classname(IOLoop.current()))") self.assertEqual(cls, "AsyncIOMainLoop") cls = self.run_python("print(classname(IOLoop()))") self.assertEqual(cls, "AsyncIOLoop") def test_asyncio(self): cls = self.run_python( 'IOLoop.configure("tornado.platform.asyncio.AsyncIOLoop")', "print(classname(IOLoop.current()))", ) self.assertEqual(cls, "AsyncIOMainLoop") @unittest.skipIf( sys.version_info >= (3, 14), "implicit event loop creation not available" ) def test_asyncio_main(self): cls = self.run_python( "from tornado.platform.asyncio import AsyncIOMainLoop", "AsyncIOMainLoop().install()", "print(classname(IOLoop.current()))", ) self.assertEqual(cls, "AsyncIOMainLoop") if __name__ == "__main__": unittest.main()
TestIOLoopConfiguration
python
pallets__jinja
src/jinja2/exceptions.py
{ "start": 4885, "end": 5026 }
class ____(TemplateRuntimeError): """This error is raised if a filter was called with inappropriate arguments """
FilterArgumentError
python
huggingface__transformers
tests/models/rag/test_modeling_rag.py
{ "start": 40726, "end": 47611 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): cls.temp_dir = tempfile.TemporaryDirectory() cls.dataset_path = cls.temp_dir.name cls.index_path = os.path.join(cls.temp_dir.name, "index.faiss") ds = load_dataset("hf-internal-testing/wiki_dpr_dummy")["train"] ds.save_to_disk(cls.dataset_path) url = "https://huggingface.co/datasets/hf-internal-testing/wiki_dpr_dummy/resolve/main/index.faiss" response = requests.get(url, stream=True) with open(cls.index_path, "wb") as fp: fp.write(response.content) @classmethod def tearDownClass(cls): cls.temp_dir.cleanup() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def get_rag_config(self): question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn") return RagConfig.from_question_encoder_generator_configs( question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=" / ", doc_sep=" // ", n_docs=5, max_combined_length=300, dataset="wiki_dpr", dataset_split="train", index_name="custom", passages_path=self.dataset_path, index_path=self.index_path, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8, dataset_revision="b24a417", ) @slow def test_rag_sequence_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, ).to(torch_device) # check that the from pretrained methods work rag_sequence.save_pretrained(tmp_dirname) rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence( input_ids, labels=decoder_input_ids, ) loss_pretrained = output.loss del rag_sequence question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") rag_sequence = RagSequenceForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) rag_sequence.to(torch_device) with torch.no_grad(): output = rag_sequence( input_ids, labels=decoder_input_ids, ) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4) @slow def test_rag_token_from_pretrained(self): rag_config = self.get_rag_config() rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( "facebook/dpr-question_encoder-single-nq-base" ) rag_retriever = RagRetriever( rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer, ) input_ids = rag_question_encoder_tokenizer( "who sings does he love me with reba", return_tensors="pt" ).input_ids decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) decoder_input_ids = decoder_input_ids.to(torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator( "facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn", retriever=rag_retriever, config=rag_config, question_encoder_max_length=200, generator_max_length=200, ).to(torch_device) # check that the from pretrained methods work rag_token.save_pretrained(tmp_dirname) rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever) rag_token.to(torch_device) self.assertTrue(rag_token.question_encoder.config.max_length == 200) self.assertTrue(rag_token.generator.config.max_length == 200) with torch.no_grad(): output = rag_token( input_ids, labels=decoder_input_ids, ) loss_pretrained = output.loss del rag_token question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base") generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn") rag_token = RagTokenForGeneration( config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever ) rag_token.to(torch_device) with torch.no_grad(): output = rag_token( input_ids, labels=decoder_input_ids, ) loss_init = output.loss self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
RagModelSaveLoadTests
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/api/client.py
{ "start": 505, "end": 2872 }
class ____: """Test context for DG API commands.""" def __init__(self, client_factory: GraphQLClientFactory): self.client_factory = client_factory self.organization = TEST_ORGANIZATION self.deployment = TEST_DEPLOYMENT def create_dg_api_graphql_client( ctx: click.Context, config: DagsterPlusCliConfig, view_graphql: bool = False ) -> IGraphQLClient: """Create GraphQL client for DG API commands. This is the single entry point for GraphQL client creation in DG API commands. It checks for test context injection first, then handles authentication for normal usage. Args: ctx: Click context from the CLI command config: Config to use for creating the client view_graphql: If True, wrap the client with debugging output Returns: IGraphQLClient instance """ # Check if we have a test context with custom factory if ctx.obj and isinstance(ctx.obj, DgApiTestContext) and ctx.obj.client_factory: client = ctx.obj.client_factory(config) else: # For normal operation, validate token exists and create client if not config.user_token: raise click.UsageError( "A Dagster Cloud API token must be specified.\n\n" "You may specify a token by:\n" "- Providing the --api-token parameter\n" "- Setting the DAGSTER_CLOUD_API_TOKEN environment variable" ) # Normal operation: create real client from config client = DagsterPlusGraphQLClient.from_config(config) # Wrap with debug client if requested if view_graphql: from dagster_dg_cli.utils.plus.gql_client import DebugGraphQLClient client = DebugGraphQLClient(client) return client def create_dg_api_client(ctx: click.Context) -> IGraphQLClient: """Create GraphQL client for DG API commands with automatic config handling. This is a convenience function for deployment commands that handles both config creation and client creation in a single step. Args: ctx: Click context from the CLI command Returns: IGraphQLClient instance """ from dagster_dg_cli.cli.api.shared import get_config_for_api_command config = get_config_for_api_command(ctx) return create_dg_api_graphql_client(ctx, config)
DgApiTestContext
python
numba__numba
numba/tests/test_parallel_backend.py
{ "start": 8866, "end": 10370 }
class ____(TestParallelBackendBase): """ These are like the numba.tests.test_threadsafety tests but designed instead to torture the parallel backend. If a suitable backend is supplied via NUMBA_THREADING_LAYER these tests can be run directly. This test class cannot be run using the multiprocessing option to the test runner (i.e. `./runtests -m`) as daemon processes cannot have children. """ # NOTE: All tests are generated based on what a platform supports concurrent # execution wise from Python, irrespective of whether the native libraries # can actually handle the behaviour present. @classmethod def generate(cls): for p in cls.parallelism: for name, impl in cls.runners.items(): methname = "test_" + p + '_' + name def methgen(impl, p): def test_method(self): selfproc = multiprocessing.current_process() # daemonized processes cannot have children if selfproc.daemon: _msg = 'daemonized processes cannot have children' self.skipTest(_msg) else: self.run_compile(impl, parallelism=p) return test_method fn = methgen(impl, p) fn.__name__ = methname setattr(cls, methname, fn) TestParallelBackend.generate()
TestParallelBackend
python
scipy__scipy
scipy/signal/tests/test_ltisys.py
{ "start": 26980, "end": 31674 }
class ____: def test_initialization(self): # Check that all initializations work StateSpace(1, 1, 1, 1) StateSpace([1], [2], [3], [4]) StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), np.array([[1, 0]]), np.array([[0]])) def test_conversion(self): # Check the conversion functions s = StateSpace(1, 2, 3, 4) assert isinstance(s.to_ss(), StateSpace) assert isinstance(s.to_tf(), TransferFunction) assert isinstance(s.to_zpk(), ZerosPolesGain) # Make sure copies work assert StateSpace(s) is not s assert s.to_ss() is not s def test_properties(self): # Test setters/getters for cross class properties. # This implicitly tests to_tf() and to_zpk() # Getters s = StateSpace(1, 1, 1, 1) xp_assert_equal(s.poles, [1.]) xp_assert_equal(s.zeros, [0.]) assert s.dt is None def test_operators(self): # Test +/-/* operators on systems class BadType: pass s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), np.array([[1], [0]]), np.array([[1, 0]]), np.array([[0]]), ) s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), np.array([[1], [0]]), np.array([[1, 0]]), np.array([[0]]) ) s_discrete = s1.to_discrete(0.1) s2_discrete = s2.to_discrete(0.2) s3_discrete = s2.to_discrete(0.1) # Impulse response t = np.linspace(0, 1, 100) u = np.zeros_like(t) u[0] = 1 # Test multiplication for typ in (int, float, complex, np.float32, np.complex128, np.array): xp_assert_close(lsim(typ(2) * s1, U=u, T=t)[1], typ(2) * lsim(s1, U=u, T=t)[1]) xp_assert_close(lsim(s1 * typ(2), U=u, T=t)[1], lsim(s1, U=u, T=t)[1] * typ(2)) xp_assert_close(lsim(s1 / typ(2), U=u, T=t)[1], lsim(s1, U=u, T=t)[1] / typ(2)) with assert_raises(TypeError): typ(2) / s1 xp_assert_close(lsim(s1 * 2, U=u, T=t)[1], lsim(s1, U=2 * u, T=t)[1]) xp_assert_close(lsim(s1 * s2, U=u, T=t)[1], lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], atol=1e-5) with assert_raises(TypeError): s1 / s1 with assert_raises(TypeError): s1 * s_discrete with assert_raises(TypeError): # Check different discretization constants s_discrete * s2_discrete with assert_raises(TypeError): s1 * BadType() with assert_raises(TypeError): BadType() * s1 with assert_raises(TypeError): s1 / BadType() with assert_raises(TypeError): BadType() / s1 # Test addition xp_assert_close(lsim(s1 + 2, U=u, T=t)[1], 2 * u + lsim(s1, U=u, T=t)[1]) # Check for dimension mismatch with assert_raises(ValueError): s1 + np.array([1, 2]) with assert_raises(ValueError): np.array([1, 2]) + s1 with assert_raises(TypeError): s1 + s_discrete with assert_raises(ValueError): s1 / np.array([[1, 2], [3, 4]]) with assert_raises(TypeError): # Check different discretization constants s_discrete + s2_discrete with assert_raises(TypeError): s1 + BadType() with assert_raises(TypeError): BadType() + s1 xp_assert_close(lsim(s1 + s2, U=u, T=t)[1], lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) # Test subtraction xp_assert_close(lsim(s1 - 2, U=u, T=t)[1], -2 * u + lsim(s1, U=u, T=t)[1]) xp_assert_close(lsim(2 - s1, U=u, T=t)[1], 2 * u + lsim(-s1, U=u, T=t)[1]) xp_assert_close(lsim(s1 - s2, U=u, T=t)[1], lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) with assert_raises(TypeError): s1 - BadType() with assert_raises(TypeError): BadType() - s1 s = s_discrete + s3_discrete assert s.dt == 0.1 s = s_discrete * s3_discrete assert s.dt == 0.1 s = 3 * s_discrete assert s.dt == 0.1 s = -s_discrete assert s.dt == 0.1
TestStateSpace
python
PyCQA__pylint
tests/functional/a/alternative/alternative_union_syntax.py
{ "start": 1746, "end": 1793 }
class ____: my_var: int | str
CustomDataClass4
python
keras-team__keras
keras/src/layers/rnn/stacked_rnn_cells_test.py
{ "start": 212, "end": 10380 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ OneStateRNNCell(3), OneStateRNNCell(4), OneStateRNNCell(5), ], }, input_shape=(2, 3, 4), expected_output_shape=(2, 5), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"OneStateRNNCell": OneStateRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ OneStateRNNCell(3), OneStateRNNCell(4), OneStateRNNCell(5), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"OneStateRNNCell": OneStateRNNCell}, ) # Two-state case. self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ TwoStatesRNNCell(3), TwoStatesRNNCell(4), TwoStatesRNNCell(5), ], }, input_shape=(2, 3, 4), expected_output_shape=(2, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ TwoStatesRNNCell(3), TwoStatesRNNCell(4), TwoStatesRNNCell(5), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell}, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.SimpleRNNCell(3, dropout=0.1, recurrent_dropout=0.1), layers.SimpleRNNCell(4, dropout=0.1, recurrent_dropout=0.1), layers.SimpleRNNCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=3, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.GRUCell(3, dropout=0.1, recurrent_dropout=0.1), layers.GRUCell(4, dropout=0.1, recurrent_dropout=0.1), layers.GRUCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=3, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": [ layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1), layers.LSTMCell(4, dropout=0.1, recurrent_dropout=0.1), layers.LSTMCell(5, dropout=0.1, recurrent_dropout=0.1), ], "return_sequences": True, }, input_shape=(2, 3, 4), expected_output_shape=(2, 3, 5), expected_num_trainable_weights=9, expected_num_non_trainable_weights=0, expected_num_seed_generators=3, supports_masking=True, ) def test_correctness_single_state_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN([OneStateRNNCell(3), OneStateRNNCell(2)]) output = layer(sequence) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), output ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_sequences=True ) output = layer(sequence) self.assertAllClose( np.array( [ [[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]], [[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]], ] ), output, ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_state=True ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), output ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1 ) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), state_2 ) layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], return_sequences=True, return_state=True, ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array( [ [[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]], [[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]], ] ), output, ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1 ) self.assertAllClose( np.array([[786.0, 786.0], [4386.0, 4386.0]]), state_2 ) def test_correctness_two_states_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN([TwoStatesRNNCell(3), TwoStatesRNNCell(2)]) output = layer(sequence) self.assertAllClose( np.array([[3144.0, 3144.0], [17544.0, 17544.0]]), output ) layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_sequences=True ) output = layer(sequence) self.assertAllClose( np.array( [ [[72.0, 72.0], [624.0, 624.0], [3144.0, 3144.0]], [[648.0, 648.0], [4080.0, 4080.0], [17544.0, 17544.0]], ] ), output, ) layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_state=True ) output, state_1, state_2 = layer(sequence) self.assertAllClose( np.array([[3144.0, 3144.0], [17544.0, 17544.0]]), output ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1[0] ) self.assertAllClose( np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1[1] ) self.assertAllClose( np.array([[1572.0, 1572.0], [8772.0, 8772.0]]), state_2[0] ) self.assertAllClose( np.array([[1572.0, 1572.0], [8772.0, 8772.0]]), state_2[1] ) def test_statefullness_single_state_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN( [OneStateRNNCell(3), OneStateRNNCell(2)], stateful=True ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array([[34092.0, 34092.0], [173196.0, 173196.0]]), output ) def test_statefullness_two_states_stack(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN( [TwoStatesRNNCell(3), TwoStatesRNNCell(2)], stateful=True ) layer(sequence) output = layer(sequence) self.assertAllClose( np.array([[136368.0, 136368.0], [692784.0, 692784.0]]), output ) def test_return_state_stacked_lstm_cell(self): layer = layers.RNN( [layers.LSTMCell(10), layers.LSTMCell(10)], return_state=True ) out = layer(np.zeros((2, 3, 5))) self.assertLen(out, 3) self.assertEqual(out[0].shape, (2, 10)) self.assertEqual(out[1][0].shape, (2, 10)) self.assertEqual(out[1][1].shape, (2, 10)) self.assertEqual(out[2][0].shape, (2, 10)) self.assertEqual(out[2][1].shape, (2, 10)) shape = layer.compute_output_shape((2, 3, 5)) self.assertLen(shape, 3) self.assertEqual(shape[0], (2, 10)) self.assertEqual(shape[1][0], (2, 10)) self.assertEqual(shape[1][1], (2, 10)) self.assertEqual(shape[2][0], (2, 10)) self.assertEqual(shape[2][1], (2, 10)) def test_stacked_lstm_cell_mask(self): sequence = np.ones((2, 3, 4)) mask = np.array([[True, True, True], [True, True, False]]) cell_kwargs = dict( units=1, kernel_initializer="ones", recurrent_initializer="ones" ) rnn_cells = [layers.LSTMCell(**cell_kwargs) for _ in range(2)] stacked_rnn = layers.RNN(rnn_cells) output = stacked_rnn(sequence, mask=mask) self.assertAllClose(np.array([[0.7793], [0.5998]]), output, atol=1e-4)
StackedRNNTest
python
ray-project__ray
rllib/examples/envs/classes/repeat_initial_obs_env.py
{ "start": 79, "end": 905 }
class ____(gym.Env): """Env in which the initial observation has to be repeated all the time. Runs for n steps. r=1 if action correct, -1 otherwise (max. R=100). """ def __init__(self, episode_len=100): self.observation_space = Discrete(2) self.action_space = Discrete(2) self.token = None self.episode_len = episode_len self.num_steps = 0 def reset(self, *, seed=None, options=None): self.token = random.choice([0, 1]) self.num_steps = 0 return self.token, {} def step(self, action): if action == self.token: reward = 1 else: reward = -1 self.num_steps += 1 done = truncated = self.num_steps >= self.episode_len return 0, reward, done, truncated, {}
RepeatInitialObsEnv
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py
{ "start": 4237, "end": 4482 }
class ____(BaseModel): """Queued Event serializer for responses..""" dag_id: str asset_id: int created_at: datetime dag_display_name: str = Field(validation_alias=AliasPath("dag_model", "dag_display_name"))
QueuedEventResponse
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/xcom.py
{ "start": 2705, "end": 2856 }
class ____(StrictBaseModel): """Payload serializer for creating an XCom entry.""" key: str value: Any map_index: int = -1
XComCreateBody
python
huggingface__transformers
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
{ "start": 3216, "end": 4154 }
class ____(ModelOutput): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ last_hidden_state: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None
Glm4vMoeModelOutputWithPast
python
kamyu104__LeetCode-Solutions
Python/minimum-substring-partition-of-equal-character-frequency.py
{ "start": 62, "end": 669 }
class ____(object): def minimumSubstringsInPartition(self, s): """ :type s: str :rtype: int """ INF = float("inf") dp = [INF]*(len(s)+1) dp[0] = 0 for i in xrange(len(s)): cnt = [0]*26 d = mx = 0 for j in reversed(xrange(i+1)): k = ord(s[j])-ord('a') if cnt[k] == 0: d += 1 cnt[k] += 1 mx = max(mx, cnt[k]) if d*mx == i-j+1: dp[i+1] = min(dp[i+1], dp[j]+1) return dp[-1]
Solution
python
numba__numba
numba/cuda/cudadrv/driver.py
{ "start": 92747, "end": 96216 }
class ____(Linker): """ Links for current device if no CC given """ def __init__(self, max_registers=0, lineinfo=False, cc=None): super().__init__(max_registers, lineinfo, cc) logsz = config.CUDA_LOG_SIZE linkerinfo = (c_char * logsz)() linkererrors = (c_char * logsz)() options = { enums.CU_JIT_INFO_LOG_BUFFER: addressof(linkerinfo), enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz), enums.CU_JIT_ERROR_LOG_BUFFER: addressof(linkererrors), enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz), enums.CU_JIT_LOG_VERBOSE: c_void_p(1), } if max_registers: options[enums.CU_JIT_MAX_REGISTERS] = c_void_p(max_registers) if lineinfo: options[enums.CU_JIT_GENERATE_LINE_INFO] = c_void_p(1) if cc is None: # No option value is needed, but we need something as a placeholder options[enums.CU_JIT_TARGET_FROM_CUCONTEXT] = 1 else: cc_val = cc[0] * 10 + cc[1] options[enums.CU_JIT_TARGET] = c_void_p(cc_val) raw_keys = list(options.keys()) raw_values = list(options.values()) option_keys = (drvapi.cu_jit_option * len(raw_keys))(*raw_keys) option_vals = (c_void_p * len(raw_values))(*raw_values) self.handle = handle = drvapi.cu_link_state() driver.cuLinkCreate(len(raw_keys), option_keys, option_vals, byref(self.handle)) weakref.finalize(self, driver.cuLinkDestroy, handle) self.linker_info_buf = linkerinfo self.linker_errors_buf = linkererrors self._keep_alive = [linkerinfo, linkererrors, option_keys, option_vals] @property def info_log(self): return self.linker_info_buf.value.decode('utf8') @property def error_log(self): return self.linker_errors_buf.value.decode('utf8') def add_ptx(self, ptx, name='<cudapy-ptx>'): ptxbuf = c_char_p(ptx) namebuf = c_char_p(name.encode('utf8')) self._keep_alive += [ptxbuf, namebuf] try: driver.cuLinkAddData(self.handle, enums.CU_JIT_INPUT_PTX, ptxbuf, len(ptx), namebuf, 0, None, None) except CudaAPIError as e: raise LinkerError("%s\n%s" % (e, self.error_log)) def add_file(self, path, kind): pathbuf = c_char_p(path.encode("utf8")) self._keep_alive.append(pathbuf) try: driver.cuLinkAddFile(self.handle, kind, pathbuf, 0, None, None) except CudaAPIError as e: if e.code == enums.CUDA_ERROR_FILE_NOT_FOUND: msg = f'{path} not found' else: msg = "%s\n%s" % (e, self.error_log) raise LinkerError(msg) def complete(self): cubin_buf = c_void_p(0) size = c_size_t(0) try: driver.cuLinkComplete(self.handle, byref(cubin_buf), byref(size)) except CudaAPIError as e: raise LinkerError("%s\n%s" % (e, self.error_log)) size = size.value assert size > 0, 'linker returned a zero sized cubin' del self._keep_alive[:] # We return a copy of the cubin because it's owned by the linker cubin_ptr = ctypes.cast(cubin_buf, ctypes.POINTER(ctypes.c_char)) return bytes(np.ctypeslib.as_array(cubin_ptr, shape=(size,)))
CtypesLinker
python
getsentry__sentry
tests/sentry/api/test_utils.py
{ "start": 6369, "end": 8765 }
class ____(APITestCase): @patch("sentry.api.utils.ParseError") def test_handle_query_errors(self, mock_parse_error: MagicMock) -> None: exceptions = [ DatasetSelectionError, IncompatibleMetricsQuery, InvalidSearchQuery, QueryConnectionFailed, QueryExecutionError, QueryExecutionTimeMaximum, QueryIllegalTypeOfArgument, QueryMemoryLimitExceeded, QueryMissingColumn, QueryOutsideRetentionError, QuerySizeExceeded, QueryTooManySimultaneous, RateLimitExceeded, SchemaValidationError, SnubaError, UnqualifiedQueryError, ] mock_parse_error.return_value = FooBarError() for ex in exceptions: try: with handle_query_errors(): raise ex except Exception as e: assert isinstance(e, (FooBarError, APIException)) def test_handle_postgres_timeout(self) -> None: class TimeoutError(OperationalError): def __str__(self) -> str: return "canceling statement due to statement timeout" try: with handle_query_errors(): raise TimeoutError() except Exception as e: assert isinstance(e, Throttled) assert ( str(e) == "Query timeout. Please try with a smaller date range or fewer conditions." ) def test_handle_postgres_user_cancel(self) -> None: class UserCancelError(OperationalError): def __str__(self) -> str: return "canceling statement due to user request" try: with handle_query_errors(): raise UserCancelError() except Exception as e: assert isinstance(e, UserCancelError) # Should propagate original error @patch("sentry.api.utils.ParseError") def test_handle_other_operational_error(self, mock_parse_error: MagicMock) -> None: class OtherError(OperationalError): pass try: with handle_query_errors(): raise OtherError() except Exception as e: assert isinstance(e, OtherError) # Should propagate original error mock_parse_error.assert_not_called()
HandleQueryErrorsTest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/types.py
{ "start": 4193, "end": 4423 }
class ____(sqltypes.TypeEngine[str]): """Provide the PostgreSQL TSQUERY type. .. versionadded:: 2.0.0rc1 """ __visit_name__ = "TSQUERY" operator_classes = OperatorClass.BASE | OperatorClass.COMPARISON
TSQUERY
python
openai__openai-python
src/openai/types/eval_create_params.py
{ "start": 4684, "end": 5307 }
class ____(TypedDict, total=False): content: Required[TestingCriterionLabelModelInputEvalItemContent] """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. One of `user`, `assistant`, `system`, or `developer`. """ type: Literal["message"] """The type of the message input. Always `message`.""" TestingCriterionLabelModelInput: TypeAlias = Union[ TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem ]
TestingCriterionLabelModelInputEvalItem
python
great-expectations__great_expectations
great_expectations/data_context/_version_checker.py
{ "start": 218, "end": 272 }
class ____(TypedDict): version: str
_PyPIPackageInfo
python
numba__numba
numba/tests/test_linalg.py
{ "start": 86295, "end": 88450 }
class ____(TestLinalgBase): """ Tests for np.linalg.matrix_power. """ def assert_int_exponenent(self, cfunc, args): # validate first arg is ok cfunc(args[0], 1) # pass in both args and assert fail with self.assertRaises(errors.TypingError): cfunc(*args) @needs_lapack def test_linalg_matrix_power(self): cfunc = jit(nopython=True)(matrix_power_matrix) def check(a, pwr): expected = matrix_power_matrix(a, pwr) got = cfunc(a, pwr) # check that the computed results are contig and in the same way self.assert_contig_sanity(got, "C") res = 7 * np.finfo(a.dtype).resolution np.testing.assert_allclose(got, expected, rtol=res, atol=res) # Ensure proper resource management with self.assertNoNRTLeak(): cfunc(a, pwr) sizes = [(1, 1), (5, 5), (7, 7)] powers = [-33, -17] + list(range(-10, 10)) + [17, 33] for size, pwr, dtype, order in \ product(sizes, powers, self.dtypes, 'FC'): a = self.specific_sample_matrix(size, dtype, order) check(a, pwr) a = np.empty((0, 0), dtype=dtype, order=order) check(a, pwr) rn = "matrix_power" # Wrong dtype self.assert_wrong_dtype(rn, cfunc, (np.ones((2, 2), dtype=np.int32), 1)) # not an int power self.assert_wrong_dtype(rn, cfunc, (np.ones((2, 2), dtype=np.int32), 1)) # non square system args = (np.ones((3, 5)), 1) msg = 'input must be a square array' self.assert_error(cfunc, args, msg) # Dimension issue self.assert_wrong_dimensions(rn, cfunc, (np.ones(10, dtype=np.float64), 1)) # non-integer supplied as exponent self.assert_int_exponenent(cfunc, (np.ones((2, 2)), 1.2)) # singular matrix is not invertible self.assert_raise_on_singular(cfunc, (np.array([[0., 0], [1, 1]]), -1))
TestLinalgMatrixPower
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classes5.py
{ "start": 4989, "end": 5079 }
class ____(ParentClass2): cv_decl_1, cv_decl_2, cv_decl_3 = (3, 4.5, 6.0)
SubclassTuple1
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/resource_requirement.py
{ "start": 3739, "end": 4424 }
class ____(ResourceKeyRequirement): key: str # pyright: ignore[reportIncompatibleMethodOverride] node_description: str input_name: str root_input: bool @property def expected_type(self) -> type: from dagster._core.storage.io_manager import IInputManagerDefinition return IInputManagerDefinition def describe_requirement(self) -> str: return ( f"input manager with key '{self.key}' required by input '{self.input_name}' of" f" {self.node_description}" ) # The ResourceRequirement for unexecutable external assets. Is an analogue to # `SourceAssetIOManagerRequirement`. @record
InputManagerRequirement
python
doocs__leetcode
solution/0600-0699/0659.Split Array into Consecutive Subsequences/Solution.py
{ "start": 0, "end": 313 }
class ____: def isPossible(self, nums: List[int]) -> bool: d = defaultdict(list) for v in nums: if h := d[v - 1]: heappush(d[v], heappop(h) + 1) else: heappush(d[v], 1) return all(not v or v and v[0] > 2 for v in d.values())
Solution
python
getsentry__sentry
src/sentry/web/frontend/debug/debug_oauth_authorize.py
{ "start": 287, "end": 1096 }
class ____(View): def get(self, request: HttpRequest) -> HttpResponse: application = ApiApplication( name="Example Application", homepage_url="http://example.com", terms_url="http://example.com/terms", privacy_url="http://example.com/privacy", ) return render_to_response( "sentry/oauth-authorize.html", { "user": request.user, "application": application, "scopes": ["org:read", "project:write"], "permissions": [ "Read access to organization details.", "Read and write access to projects.", ], }, request, ) @internal_region_silo_view
DebugOAuthAuthorizeView
python
nedbat__coveragepy
tests/test_cmdline.py
{ "start": 7751, "end": 38299 }
class ____(BaseCmdLineTest): """Tests of the coverage.py command line.""" def test_annotate(self) -> None: # coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "annotate", """\ cov = Coverage() cov.load() cov.annotate() """, ) self.cmd_executes( "annotate -d dir1", """\ cov = Coverage() cov.load() cov.annotate(directory="dir1") """, ) self.cmd_executes( "annotate -i", """\ cov = Coverage() cov.load() cov.annotate(ignore_errors=True) """, ) self.cmd_executes( "annotate --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.annotate(omit=["fooey"]) """, ) self.cmd_executes( "annotate --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.annotate(omit=["fooey", "booey"]) """, ) self.cmd_executes( "annotate mod1", """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1"]) """, ) self.cmd_executes( "annotate mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.annotate(morfs=["mod1", "mod2", "mod3"]) """, ) def test_combine(self) -> None: # coverage combine with args self.cmd_executes( "combine datadir1", """\ cov = Coverage() cov.combine(["datadir1"], strict=True, keep=False) cov.save() """, ) # coverage combine, appending self.cmd_executes( "combine --append datadir1", """\ cov = Coverage() cov.load() cov.combine(["datadir1"], strict=True, keep=False) cov.save() """, ) # coverage combine without args self.cmd_executes( "combine", """\ cov = Coverage() cov.combine(None, strict=True, keep=False) cov.save() """, ) # coverage combine quietly self.cmd_executes( "combine -q", """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() """, ) self.cmd_executes( "combine --quiet", """\ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() """, ) self.cmd_executes( "combine --data-file=foo.cov", """\ cov = Coverage(data_file="foo.cov") cov.combine(None, strict=True, keep=False) cov.save() """, ) def test_combine_doesnt_confuse_options_with_args(self) -> None: # https://github.com/coveragepy/coveragepy/issues/385 self.cmd_executes( "combine --rcfile cov.ini", """\ cov = Coverage(config_file='cov.ini') cov.combine(None, strict=True, keep=False) cov.save() """, ) self.cmd_executes( "combine --rcfile cov.ini data1 data2/more", """\ cov = Coverage(config_file='cov.ini') cov.combine(["data1", "data2/more"], strict=True, keep=False) cov.save() """, ) @pytest.mark.parametrize( "cmd, output", [ ( "debug", "What information would you like: config, data, sys, premain, pybehave, sqlite?", ), ("debug foo", "Don't know what you mean by 'foo'"), ("debug sys config", "Only one topic at a time, please"), ], ) def test_debug(self, cmd: str, output: str) -> None: self.cmd_help(cmd, output) def test_debug_sys(self) -> None: self.command_line("debug sys") out = self.stdout() assert "version:" in out assert "data_file:" in out def test_debug_config(self) -> None: self.command_line("debug config") out = self.stdout() assert "cover_pylib:" in out assert "skip_covered:" in out assert "skip_empty:" in out def test_debug_pybehave(self) -> None: self.command_line("debug pybehave") out = self.stdout() assert " CPYTHON:" in out assert " PYVERSION:" in out assert " deferred_annotations:" in out # Some things that shouldn't appear.. assert "typing." not in out # import from typing assert ": <" not in out # objects without a good repr # It should report PYVERSION correctly. pyversion = re_line(r" PYVERSION:", out) vtuple = ast.literal_eval(pyversion.partition(":")[-1].strip()) assert vtuple[:5] == sys.version_info def test_debug_premain(self) -> None: self.command_line("debug premain") out = self.stdout() # -- premain --------------------------------------------------- # ... many lines ... # _multicall : /Users/ned/cov/trunk/.tox/py39/site-packages/pluggy/_callers.py:77 # pytest_pyfunc_call : /Users/ned/cov/trunk/.tox/py39/site-packages/_pytest/python.py:183 # test_debug_premain : /Users/ned/cov/trunk/tests/test_cmdline.py:284 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:309 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:472 # command_line : /Users/ned/cov/trunk/coverage/cmdline.py:592 # do_debug : /Users/ned/cov/trunk/coverage/cmdline.py:804 lines = out.splitlines() s = re.escape(os.sep) assert lines[0].startswith("-- premain ----") assert len(lines) > 25 assert re.search(rf"{s}site-packages{s}_pytest{s}", out) assert re.search(rf"{s}site-packages{s}pluggy{s}", out) assert re.search(rf"(?m)^\s+test_debug_premain : .*{s}tests{s}test_cmdline.py:\d+$", out) assert re.search(rf"(?m)^\s+command_line : .*{s}coverage{s}cmdline.py:\d+$", out) assert re.search(rf"(?m)^\s+do_debug : .*{s}coverage{s}cmdline.py:\d+$", out) assert "do_debug : " in lines[-1] def test_debug_sqlite(self) -> None: self.command_line("debug sqlite") out = self.stdout() assert "sqlite3_sqlite_version:" in out assert "sqlite3_compile_options:" in out assert len(out.splitlines()) > 15 # Lots of lines of indented SQLite compile-time options. assert len(re_lines(r"^ {20,35}[A-Z]{3}", out)) > 12 def test_erase(self) -> None: # coverage erase self.cmd_executes( "erase", """\ cov = Coverage() cov.erase() """, ) self.cmd_executes( "erase --data-file=foo.cov", """\ cov = Coverage(data_file="foo.cov") cov.erase() """, ) def test_version(self) -> None: # coverage --version self.cmd_help("--version", topic="version", ret=OK) def test_help_option(self) -> None: # coverage -h self.cmd_help("-h", topic="help", ret=OK) self.cmd_help("--help", topic="help", ret=OK) def test_help_command(self) -> None: self.cmd_executes("help", "show_help(topic='help')") def test_cmd_help(self) -> None: self.cmd_executes("run --help", "show_help(parser='<CmdOptionParser:run>')") self.cmd_executes_same("help run", "run --help") def test_html(self) -> None: # coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "html", """\ cov = Coverage() cov.load() cov.html_report() """, ) self.cmd_executes( "html -d dir1", """\ cov = Coverage() cov.load() cov.html_report(directory="dir1") """, ) self.cmd_executes( "html -i", """\ cov = Coverage() cov.load() cov.html_report(ignore_errors=True) """, ) self.cmd_executes( "html --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.html_report(omit=["fooey"]) """, ) self.cmd_executes( "html --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.html_report(omit=["fooey", "booey"]) """, ) self.cmd_executes( "html mod1", """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1"]) """, ) self.cmd_executes( "html mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.html_report(morfs=["mod1", "mod2", "mod3"]) """, ) self.cmd_executes( "html --precision=3", """\ cov = Coverage() cov.load() cov.html_report(precision=3) """, ) self.cmd_executes( "html --title=Hello_there", """\ cov = Coverage() cov.load() cov.html_report(title='Hello_there') """, ) self.cmd_executes( "html -q", """\ cov = Coverage(messages=False) cov.load() cov.html_report() """, ) self.cmd_executes( "html --quiet", """\ cov = Coverage(messages=False) cov.load() cov.html_report() """, ) def test_json(self) -> None: # coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "json", """\ cov = Coverage() cov.load() cov.json_report() """, ) self.cmd_executes( "json --pretty-print", """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True) """, ) self.cmd_executes( "json --pretty-print --show-contexts", """\ cov = Coverage() cov.load() cov.json_report(pretty_print=True, show_contexts=True) """, ) self.cmd_executes( "json -i", """\ cov = Coverage() cov.load() cov.json_report(ignore_errors=True) """, ) self.cmd_executes( "json -o myjson.foo", """\ cov = Coverage() cov.load() cov.json_report(outfile="myjson.foo") """, ) self.cmd_executes( "json -o -", """\ cov = Coverage() cov.load() cov.json_report(outfile="-") """, ) self.cmd_executes( "json --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.json_report(omit=["fooey"]) """, ) self.cmd_executes( "json --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.json_report(omit=["fooey", "booey"]) """, ) self.cmd_executes( "json mod1", """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1"]) """, ) self.cmd_executes( "json mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.json_report(morfs=["mod1", "mod2", "mod3"]) """, ) self.cmd_executes( "json -q", """\ cov = Coverage(messages=False) cov.load() cov.json_report() """, ) self.cmd_executes( "json --quiet", """\ cov = Coverage(messages=False) cov.load() cov.json_report() """, ) def test_lcov(self) -> None: # coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "lcov", """\ cov = Coverage() cov.load() cov.lcov_report() """, ) self.cmd_executes( "lcov -i", """\ cov = Coverage() cov.load() cov.lcov_report(ignore_errors=True) """, ) self.cmd_executes( "lcov -o mylcov.foo", """\ cov = Coverage() cov.load() cov.lcov_report(outfile="mylcov.foo") """, ) self.cmd_executes( "lcov -o -", """\ cov = Coverage() cov.load() cov.lcov_report(outfile="-") """, ) self.cmd_executes( "lcov --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.lcov_report(omit=["fooey"]) """, ) self.cmd_executes( "lcov --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.lcov_report(omit=["fooey", "booey"]) """, ) self.cmd_executes( "lcov -q", """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() """, ) self.cmd_executes( "lcov --quiet", """\ cov = Coverage(messages=False) cov.load() cov.lcov_report() """, ) def test_report(self) -> None: # coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "report", """\ cov = Coverage() cov.load() cov.report(show_missing=None) """, ) self.cmd_executes( "report -i", """\ cov = Coverage() cov.load() cov.report(ignore_errors=True) """, ) self.cmd_executes( "report -m", """\ cov = Coverage() cov.load() cov.report(show_missing=True) """, ) self.cmd_executes( "report --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.report(omit=["fooey"]) """, ) self.cmd_executes( "report --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.report(omit=["fooey", "booey"]) """, ) self.cmd_executes( "report mod1", """\ cov = Coverage() cov.load() cov.report(morfs=["mod1"]) """, ) self.cmd_executes( "report mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.report(morfs=["mod1", "mod2", "mod3"]) """, ) self.cmd_executes( "report --precision=7", """\ cov = Coverage() cov.load() cov.report(precision=7) """, ) self.cmd_executes( "report --skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=True) """, ) self.cmd_executes( "report --skip-covered --no-skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=False) """, ) self.cmd_executes( "report --no-skip-covered", """\ cov = Coverage() cov.load() cov.report(skip_covered=False) """, ) self.cmd_executes( "report --skip-empty", """\ cov = Coverage() cov.load() cov.report(skip_empty=True) """, ) self.cmd_executes( "report --contexts=foo,bar", """\ cov = Coverage() cov.load() cov.report(contexts=["foo", "bar"]) """, ) self.cmd_executes( "report --sort=-foo", """\ cov = Coverage() cov.load() cov.report(sort='-foo') """, ) self.cmd_executes( "report --data-file=foo.cov.2", """\ cov = Coverage(data_file="foo.cov.2") cov.load() cov.report(show_missing=None) """, ) self.cmd_executes( "report --format=markdown", """\ cov = Coverage() cov.load() cov.report(output_format="markdown") """, ) def test_run(self) -> None: # coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] # run calls coverage.erase first. self.cmd_executes( "run foo.py", """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) # run -a combines with an existing data file before saving. self.cmd_executes( "run -a foo.py", """\ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.load() cov.start() runner.run() cov.stop() cov.save() """, ) # --timid sets a flag, and program arguments get passed through. self.cmd_executes( "run --timid foo.py abc 123", """\ cov = Coverage(timid=True) runner = PyRunner(['foo.py', 'abc', '123'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) # -L sets a flag, and flags for the program don't confuse us. self.cmd_executes( "run -p -L foo.py -a -b", """\ cov = Coverage(cover_pylib=True, data_suffix=True) runner = PyRunner(['foo.py', '-a', '-b'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --branch foo.py", """\ cov = Coverage(branch=True) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --rcfile=myrc.rc foo.py", """\ cov = Coverage(config_file="myrc.rc") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --include=pre1,pre2 foo.py", """\ cov = Coverage(include=["pre1", "pre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --omit=opre1,opre2 foo.py", """\ cov = Coverage(omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\ cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --source=quux,hi.there,/home/bar foo.py", """\ cov = Coverage(source=["quux", "hi.there", "/home/bar"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --concurrency=gevent foo.py", """\ cov = Coverage(concurrency=['gevent']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --concurrency=multiprocessing foo.py", """\ cov = Coverage(concurrency=['multiprocessing']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --concurrency=gevent,thread foo.py", """\ cov = Coverage(concurrency=['gevent', 'thread']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --data-file=output.coverage foo.py", """\ cov = Coverage(data_file="output.coverage") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) def test_multiprocessing_needs_config_file(self) -> None: # You can't use command-line args to add options to multiprocessing # runs, since they won't make it to the subprocesses. You need to use a # config file. self.command_line("run --concurrency=multiprocessing --branch foo.py", ret=ERR) msg = "Options affecting multiprocessing must only be specified in a configuration file." _, err = self.stdouterr() assert msg in err assert "Remove --branch from the command line." in err def test_run_debug(self) -> None: self.cmd_executes( "run --debug=opt1 foo.py", """\ cov = Coverage(debug=["opt1"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --debug=opt1,opt2 foo.py", """\ cov = Coverage(debug=["opt1","opt2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) def test_run_module(self) -> None: self.cmd_executes( "run -m mymodule", """\ cov = Coverage() runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run -m mymodule -qq arg1 arg2", """\ cov = Coverage() runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes( "run --branch -m mymodule", """\ cov = Coverage(branch=True) runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, ) self.cmd_executes_same("run -m mymodule", "run --module mymodule") def test_run_nothing(self) -> None: self.command_line("run", ret=ERR) assert "Nothing to do" in self.stderr() def test_run_from_config(self) -> None: options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"} self.cmd_executes( "run", """\ cov = Coverage() runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, options=options, ) def test_run_module_from_config(self) -> None: self.cmd_executes( "run", """\ cov = Coverage() runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() """, options={"run:command_line": "-m mymodule thing1 thing2"}, ) def test_run_from_config_but_empty(self) -> None: self.cmd_executes( "run", """\ cov = Coverage() show_help('Nothing to do.') """, ret=ERR, options={"run:command_line": ""}, ) def test_run_dashm_only(self) -> None: self.cmd_executes( "run -m", """\ cov = Coverage() show_help('No module specified for -m') """, ret=ERR, ) self.cmd_executes( "run -m", """\ cov = Coverage() show_help('No module specified for -m') """, ret=ERR, options={"run:command_line": "myprog.py"}, ) def test_cant_append_parallel(self) -> None: self.command_line("run --append --parallel-mode foo.py", ret=ERR) assert "Can't append to data files in parallel mode." in self.stderr() def test_xml(self) -> None: # coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...] self.cmd_executes( "xml", """\ cov = Coverage() cov.load() cov.xml_report() """, ) self.cmd_executes( "xml -i", """\ cov = Coverage() cov.load() cov.xml_report(ignore_errors=True) """, ) self.cmd_executes( "xml -o myxml.foo", """\ cov = Coverage() cov.load() cov.xml_report(outfile="myxml.foo") """, ) self.cmd_executes( "xml -o -", """\ cov = Coverage() cov.load() cov.xml_report(outfile="-") """, ) self.cmd_executes( "xml --omit fooey", """\ cov = Coverage(omit=["fooey"]) cov.load() cov.xml_report(omit=["fooey"]) """, ) self.cmd_executes( "xml --omit fooey,booey", """\ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.xml_report(omit=["fooey", "booey"]) """, ) self.cmd_executes( "xml mod1", """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1"]) """, ) self.cmd_executes( "xml mod1 mod2 mod3", """\ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1", "mod2", "mod3"]) """, ) self.cmd_executes( "xml -q", """\ cov = Coverage(messages=False) cov.load() cov.xml_report() """, ) self.cmd_executes( "xml --quiet", """\ cov = Coverage(messages=False) cov.load() cov.xml_report() """, ) def test_no_arguments_at_all(self) -> None: self.cmd_help("", topic="minimum_help", ret=OK) def test_bad_command(self) -> None: self.cmd_help("xyzzy", "Unknown command: 'xyzzy'") def test_save_signal_wrong(self) -> None: self.cmd_help( "run --save-signal=XYZ nothing.py", "option --save-signal: invalid choice: 'XYZ' (choose from 'USR1', 'USR2')", ) @pytest.mark.skipif(not env.WINDOWS, reason="this is a windows-only error") def test_save_signal_windows(self) -> None: self.cmd_help( "run --save-signal=USR1 nothing.py", "--save-signal is not supported on Windows.", )
CmdLineTest
python
huggingface__transformers
src/transformers/models/rag/retrieval_rag.py
{ "start": 13605, "end": 15242 }
class ____(HFIndexBase): """ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the indicated paths on disk. Args: vector_size (`int`): the dimension of the passages embeddings used by the index dataset_path (`str`): The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and embeddings (arrays of dimension vector_size) index_path (`str`) The path to the serialized faiss index on disk. """ def __init__(self, vector_size: int, dataset, index_path=None): requires_backends(self, ["faiss"]) super().__init__(vector_size, dataset, index_initialized=index_path is None) self.index_path = index_path @classmethod def load_from_disk(cls, vector_size, dataset_path, index_path): logger.info(f"Loading passages from {dataset_path}") if dataset_path is None or index_path is None: raise ValueError( "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " "and `dataset.get_index('embeddings').save(index_path)`." ) dataset = load_from_disk(dataset_path) return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) def init_index(self): if not self.is_initialized(): logger.info(f"Loading index from {self.index_path}") self.dataset.load_faiss_index("embeddings", file=self.index_path) self._index_initialized = True
CustomHFIndex
python
huggingface__transformers
tests/models/poolformer/test_image_processing_poolformer.py
{ "start": 1018, "end": 3023 }
class ____: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize_and_center_crop=True, size=None, crop_pct=0.9, crop_size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"shortest_edge": 30} crop_size = crop_size if crop_size is not None else {"height": 30, "width": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize_and_center_crop = do_resize_and_center_crop self.size = size self.crop_pct = crop_pct self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision
PoolFormerImageProcessingTester
python
facebookresearch__faiss
tests/test_contrib.py
{ "start": 726, "end": 1790 }
class ____(unittest.TestCase): def do_test_compute_GT(self, metric=faiss.METRIC_L2, ngpu=0): d = 64 xt, xb, xq = get_dataset_2(d, 0, 10000, 100) index = faiss.IndexFlat(d, metric) index.add(xb) Dref, Iref = index.search(xq, 10) # iterator function on the matrix def matrix_iterator(xb, bs): for i0 in range(0, xb.shape[0], bs): yield xb[i0:i0 + bs] Dnew, Inew = knn_ground_truth( xq, matrix_iterator(xb, 1000), 10, metric, ngpu=ngpu) np.testing.assert_array_equal(Iref, Inew) # decimal = 4 required when run on GPU np.testing.assert_almost_equal(Dref, Dnew, decimal=4) def test_compute_GT(self): self.do_test_compute_GT() def test_compute_GT_ip(self): self.do_test_compute_GT(faiss.METRIC_INNER_PRODUCT) def test_compute_GT_gpu(self): self.do_test_compute_GT(ngpu=-1) def test_compute_GT_ip_gpu(self): self.do_test_compute_GT(faiss.METRIC_INNER_PRODUCT, ngpu=-1)
TestComputeGT
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 131750, "end": 135309 }
class ____(Uuid[_UUID_RETURN], type_api.NativeForEmulated): """Represent the SQL UUID type. This is the SQL-native form of the :class:`_types.Uuid` database agnostic datatype, and is backwards compatible with the previous PostgreSQL-only version of ``UUID``. The :class:`_sqltypes.UUID` datatype only works on databases that have a SQL datatype named ``UUID``. It will not function for backends which don't have this exact-named type, including SQL Server. For backend-agnostic UUID values with native support, including for SQL Server's ``UNIQUEIDENTIFIER`` datatype, use the :class:`_sqltypes.Uuid` datatype. .. versionadded:: 2.0 .. seealso:: :class:`_sqltypes.Uuid` """ __visit_name__ = "UUID" @overload def __init__(self: UUID[_python_UUID], as_uuid: Literal[True] = ...): ... @overload def __init__(self: UUID[str], as_uuid: Literal[False] = ...): ... def __init__(self, as_uuid: bool = True): """Construct a :class:`_sqltypes.UUID` type. :param as_uuid=True: if True, values will be interpreted as Python uuid objects, converting to/from string via the DBAPI. .. versionchanged:: 2.0 ``as_uuid`` now defaults to ``True``. """ self.as_uuid = as_uuid self.native_uuid = True @classmethod def adapt_emulated_to_native(cls, impl, **kw): kw.setdefault("as_uuid", impl.as_uuid) return cls(**kw) NULLTYPE = NullType() BOOLEANTYPE = Boolean() STRINGTYPE = String() INTEGERTYPE = Integer() NUMERICTYPE: Numeric[decimal.Decimal] = Numeric() MATCHTYPE = MatchType() TABLEVALUE = TableValueType() DATETIME_TIMEZONE = DateTime(timezone=True) TIME_TIMEZONE = Time(timezone=True) _BIGINTEGER = BigInteger() _DATETIME = DateTime() _TIME = Time() _STRING = String() _UNICODE = Unicode() _type_map: Dict[Type[Any], TypeEngine[Any]] = { int: Integer(), float: Float(), bool: BOOLEANTYPE, _python_UUID: Uuid(), decimal.Decimal: Numeric(), dt.date: Date(), dt.datetime: _DATETIME, dt.time: _TIME, dt.timedelta: Interval(), type(None): NULLTYPE, bytes: LargeBinary(), str: _STRING, enum.Enum: Enum(enum.Enum), Literal: Enum(enum.Enum), # type: ignore[dict-item] } _type_map_get = _type_map.get def _resolve_value_to_type(value: Any) -> TypeEngine[Any]: _result_type = _type_map_get(type(value), False) if _result_type is False: _result_type = getattr(value, "__sa_type_engine__", False) if _result_type is False: # use inspect() to detect SQLAlchemy built-in # objects. insp = inspection.inspect(value, False) if ( insp is not None and # foil mock.Mock() and other impostors by ensuring # the inspection target itself self-inspects insp.__class__ in inspection._registrars ): raise exc.ArgumentError( "Object %r is not legal as a SQL literal value" % (value,) ) return NULLTYPE else: return _result_type._resolve_for_literal( # type: ignore [union-attr] value ) # back-assign to type_api type_api.BOOLEANTYPE = BOOLEANTYPE type_api.STRINGTYPE = STRINGTYPE type_api.INTEGERTYPE = INTEGERTYPE type_api.NULLTYPE = NULLTYPE type_api.NUMERICTYPE = NUMERICTYPE type_api.MATCHTYPE = MATCHTYPE type_api.INDEXABLE = INDEXABLE = Indexable type_api.TABLEVALUE = TABLEVALUE type_api._resolve_value_to_type = _resolve_value_to_type
UUID
python
GoogleCloudPlatform__python-docs-samples
speech/microphone/transcribe_streaming_infinite_v2_test.py
{ "start": 735, "end": 2663 }
class ____: def __init__(self: object, audio_filename: str) -> None: self.audio_filename = audio_filename def __call__(self: object, *args: object) -> object: return self def open( self: object, stream_callback: object, rate: int, *args: object, **kwargs: object ) -> object: self.rate = rate self.closed = threading.Event() self.stream_thread = threading.Thread( target=self.stream_audio, args=(self.audio_filename, stream_callback, self.closed), ) self.stream_thread.start() return self def close(self: object) -> None: self.closed.set() def stop_stream(self: object) -> None: pass def terminate(self: object) -> None: pass def stream_audio( self: object, audio_filename: str, callback: object, closed: object, num_frames: int = 512, ) -> None: with open(audio_filename, "rb") as audio_file: while not closed.is_set(): # Approximate realtime by sleeping for the appropriate time for # the requested number of frames time.sleep(num_frames / float(self.rate)) # audio is 16-bit samples, whereas python byte is 8-bit num_bytes = 2 * num_frames chunk = audio_file.read(num_bytes) or b"\0" * num_bytes callback(chunk, None, None, None) @mock.patch.dict( "sys.modules", pyaudio=mock.MagicMock(PyAudio=MockPyAudio(os.path.join(RESOURCES, "quit.raw"))), ) def test_main(capsys: pytest.CaptureFixture) -> None: import transcribe_streaming_infinite_v2 GCP_PROJECT_ID = "python-docs-samples-tests" transcribe_streaming_infinite_v2.main(GCP_PROJECT_ID) out, err = capsys.readouterr() assert re.search(r"quit", out, re.DOTALL | re.I)
MockPyAudio
python
apache__airflow
shared/logging/src/airflow_shared/logging/percent_formatter.py
{ "start": 1203, "end": 3050 }
class ____(collections.abc.Mapping): __slots__ = ("event", "styles", "level_styles", "method_name", "no_colors") def __init__( self, event: EventDict, method_name: str, level_styles: dict[str, str], styles: ColumnStyles ): self.event = event self.method_name = method_name self.level_styles = level_styles self.styles = styles self.no_colors = self.styles.reset == "" def __getitem__(self, key): # Roughly compatible with names from https://github.com/python/cpython/blob/v3.13.7/Lib/logging/__init__.py#L571 # Plus with ColoredLog added in if key in PercentFormatRender.callsite_parameters: return self.event.get(PercentFormatRender.callsite_parameters[key].value) if key == "name": return self.event.get("logger") or self.event.get("logger_name") if key == "levelname": return self.event.get("level", self.method_name).upper() if key == "asctime" or key == "created": return ( self.event.get("timestamp", None) or datetime.datetime.now(tz=datetime.timezone.utc).isoformat() ) if key == "message": return self.event["event"] if key in ("red", "green", "yellow", "blue", "purple", "cyan"): if self.no_colors: return "" return getattr(structlog.dev, key.upper(), "") if key == "reset": return self.styles.reset if key == "log_color": if self.no_colors: return "" return self.level_styles.get(self.event.get("level", self.method_name), "") return self.event[key] def __iter__(self): return self.event.__iter__() def __len__(self): return len(self.event)
_LazyLogRecordDict
python
networkx__networkx
networkx/algorithms/tests/test_link_prediction.py
{ "start": 471, "end": 1883 }
class ____: @classmethod def setup_class(cls): cls.func = staticmethod(nx.resource_allocation_index) cls.test = staticmethod(partial(_test_func, predict_func=cls.func)) def test_K5(self): G = nx.complete_graph(5) self.test(G, [(0, 1)], [(0, 1, 0.75)]) def test_P3(self): G = nx.path_graph(3) self.test(G, [(0, 2)], [(0, 2, 0.5)]) def test_S4(self): G = nx.star_graph(4) self.test(G, [(1, 2)], [(1, 2, 0.25)]) @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) def test_notimplemented(self, graph_type): G = graph_type([(0, 1), (1, 2)]) with pytest.raises(nx.NetworkXNotImplemented): self.func(G, [(0, 2)]) def test_node_not_found(self): G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (2, 3)]) with pytest.raises(nx.NodeNotFound): self.func(G, [(0, 4)]) def test_no_common_neighbor(self): G = nx.Graph() G.add_nodes_from([0, 1]) self.test(G, [(0, 1)], [(0, 1, 0)]) def test_equal_nodes(self): G = nx.complete_graph(4) self.test(G, [(0, 0)], [(0, 0, 1)]) def test_all_nonexistent_edges(self): G = nx.Graph() G.add_edges_from([(0, 1), (0, 2), (2, 3)]) self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)])
TestResourceAllocationIndex
python
langchain-ai__langchain
libs/langchain/tests/unit_tests/evaluation/agents/test_eval_chain.py
{ "start": 998, "end": 5961 }
class ____(FakeChatModel): queries: dict = Field(default_factory=dict) sequential_responses: bool | None = False response_index: int = 0 @override def _call( self, messages: list[BaseMessage], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> str: if self.sequential_responses: response = self.queries[list(self.queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response prompt = messages[0].content return self.queries[prompt] def test_trajectory_output_parser_parse() -> None: trajectory_output_parser = TrajectoryOutputParser() text = """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 2""" got = trajectory_output_parser.parse(text) want = TrajectoryEval( score=0.25, reasoning="""Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.""", ) assert got["score"] == want["score"] assert got["reasoning"] == want["reasoning"] with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.""", ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 9""", ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 10""", ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: 0.1""", ) with pytest.raises(OutputParserException): trajectory_output_parser.parse( """Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2. Score: One""", ) def test_trajectory_eval_chain( intermediate_steps: list[tuple[AgentAction, str]], ) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo]) # Test when ref is not provided res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", ) assert res["score"] == 1.0 # Test when ref is provided res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", reference="Paris", ) assert res["score"] == 0.0 def test_trajectory_eval_chain_no_tools( intermediate_steps: list[tuple[AgentAction, str]], ) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", ) assert res["score"] == 1.0 res = chain.evaluate_agent_trajectory( input="What is your favorite food?", agent_trajectory=intermediate_steps, prediction="I like pie.", reference="Paris", ) assert res["score"] == 0.0 def test_old_api_works(intermediate_steps: list[tuple[AgentAction, str]]) -> None: llm = _FakeTrajectoryChatModel( queries={ "a": "Trajectory good\nScore: 5", "b": "Trajectory not good\nScore: 1", }, sequential_responses=True, ) chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain( { "question": "What is your favorite food?", "agent_trajectory": intermediate_steps, "answer": "I like pie.", }, ) assert res["score"] == 1.0 res = chain( { "question": "What is your favorite food?", "agent_trajectory": intermediate_steps, "answer": "I like pie.", "reference": "Paris", }, ) assert res["score"] == 0.0
_FakeTrajectoryChatModel
python
sphinx-doc__sphinx
sphinx/pycode/__init__.py
{ "start": 399, "end": 6156 }
class ____: annotations: dict[tuple[str, str], str] attr_docs: dict[tuple[str, str], list[str]] finals: list[str] overloads: dict[str, list[Signature]] tagorder: dict[str, int] tags: dict[str, tuple[str, int, int]] # cache for analyzer objects -- caches both by module and file name cache: dict[tuple[Literal['file', 'module'], str | _StrPath], Any] = {} @staticmethod def get_module_source(modname: str) -> tuple[_StrPath | None, str | None]: """Try to find the source code for a module. Returns ('filename', 'source'). One of it can be None if no filename or source found """ try: mod = import_module(modname) except Exception as err: raise PycodeError('error importing %r' % modname, err) from err loader = getattr(mod, '__loader__', None) filename: str | None = getattr(mod, '__file__', None) if loader and getattr(loader, 'get_source', None): # prefer Native loader, as it respects #coding directive try: source = loader.get_source(modname) if source: mod_path = None if filename is None else _StrPath(filename) # no exception and not None - it must be module source return mod_path, source except ImportError: pass # Try other "source-mining" methods if filename is None and loader and getattr(loader, 'get_filename', None): # have loader, but no filename try: filename = loader.get_filename(modname) except ImportError as err: raise PycodeError( 'error getting filename for %r' % modname, err ) from err if filename is None: # all methods for getting filename failed, so raise... raise PycodeError('no source found for module %r' % modname) mod_path = _StrPath(filename).resolve() if mod_path.suffix in {'.pyo', '.pyc'}: mod_path_pyw = mod_path.with_suffix('.pyw') if not mod_path.is_file() and mod_path_pyw.is_file(): mod_path = mod_path_pyw else: mod_path = mod_path.with_suffix('.py') elif mod_path.suffix not in {'.py', '.pyw'}: msg = f'source is not a .py file: {mod_path!r}' raise PycodeError(msg) if not mod_path.is_file(): msg = f'source file is not present: {mod_path!r}' raise PycodeError(msg) return mod_path, None @classmethod def for_string( cls: type[ModuleAnalyzer], string: str, modname: str, srcname: str | os.PathLike[str] = '<string>', ) -> ModuleAnalyzer: return cls(string, modname, srcname) @classmethod def for_file( cls: type[ModuleAnalyzer], filename: str | os.PathLike[str], modname: str ) -> ModuleAnalyzer: filename = _StrPath(filename) if ('file', filename) in cls.cache: return cls.cache['file', filename] try: with tokenize.open(filename) as f: string = f.read() obj = cls(string, modname, filename) cls.cache['file', filename] = obj except Exception as err: raise PycodeError('error opening %r' % filename, err) from err return obj @classmethod def for_module(cls: type[ModuleAnalyzer], modname: str) -> ModuleAnalyzer: if ('module', modname) in cls.cache: entry = cls.cache['module', modname] if isinstance(entry, PycodeError): raise entry return entry try: filename, source = cls.get_module_source(modname) if source is not None: obj = cls.for_string(source, modname, filename or '<string>') elif filename is not None: obj = cls.for_file(filename, modname) except PycodeError as err: cls.cache['module', modname] = err raise cls.cache['module', modname] = obj return obj def __init__( self, source: str, modname: str, srcname: str | os.PathLike[str] ) -> None: self.modname = modname # name of the module self.srcname = str(srcname) # name of the source file # cache the source code as well self.code = source self._analyzed = False def analyze(self) -> None: """Analyze the source code.""" if self._analyzed: return try: parser = Parser(self.code) parser.parse() self.attr_docs = {} for scope, comment in parser.comments.items(): if comment: self.attr_docs[scope] = [*comment.splitlines(), ''] else: self.attr_docs[scope] = [''] self.annotations = parser.annotations self.finals = parser.finals self.overloads = parser.overloads self.tags = parser.definitions self.tagorder = parser.deforders self._analyzed = True except Exception as exc: msg = f'parsing {self.srcname!r} failed: {exc!r}' raise PycodeError(msg) from exc def find_attr_docs(self) -> dict[tuple[str, str], list[str]]: """Find class and module-level attributes and their documentation.""" self.analyze() return self.attr_docs def find_tags(self) -> dict[str, tuple[str, int, int]]: """Find class, function and method definitions and their location.""" self.analyze() return self.tags
ModuleAnalyzer
python
google__jax
tests/pallas/pallas_jumble_test.py
{ "start": 10803, "end": 10935 }
class ____(PallasCallRaggedVmapTest): INTERPRET = True if __name__ == "__main__": absltest.main()
PallasCallNamedGridInterpretTest
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 114723, "end": 114883 }
class ____(MaybeAlignPartitions): _parameters = ["frame", "other", "op", "na_action", "meta"] _projection_passthrough = False _expr_cls = Map
MapAlign
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/internal/conjecture/dfa/lstar.py
{ "start": 17295, "end": 19320 }
class ____: """A class for replacing non-negative integers with a "canonical" value that is equivalent for all relevant purposes.""" def __init__(self): # We store canonical values as a sorted list of integers # with each value being treated as equivalent to the largest # integer in the list that is below it. self.__values = IntList([0]) self.__cache = {} def __repr__(self): return f"IntegerNormalizer({list(self.__values)!r})" def __copy__(self): result = IntegerNormalizer() result.__values = IntList(self.__values) return result def representatives(self): yield from self.__values def normalize(self, value): """Return the canonical integer considered equivalent to ``value``.""" try: return self.__cache[value] except KeyError: pass i = bisect_right(self.__values, value) - 1 assert i >= 0 return self.__cache.setdefault(value, self.__values[i]) def distinguish(self, value, test): """Checks whether ``test`` gives the same answer for ``value`` and ``self.normalize(value)``. If it does not, updates the list of canonical values so that it does. Returns True if and only if this makes a change to the underlying canonical values.""" canonical = self.normalize(value) if canonical == value: return False value_test = test(value) if test(canonical) == value_test: return False self.__cache.clear() def can_lower(k): new_canon = value - k if new_canon <= canonical: return False return test(new_canon) == value_test new_canon = value - find_integer(can_lower) assert new_canon not in self.__values insort(self.__values, new_canon) assert self.normalize(value) == new_canon return True
IntegerNormalizer
python
python-markdown__markdown
markdown/inlinepatterns.py
{ "start": 14504, "end": 15345 }
class ____(Pattern): # pragma: no cover """ Return element of type `tag` with a text attribute of `group(3)` of a Pattern. """ def __init__(self, pattern: str, tag: str): """ Create an instant of an simple tag pattern. Arguments: pattern: A regular expression that matches a pattern. tag: Tag of element. """ Pattern.__init__(self, pattern) self.tag = tag """ The tag of the rendered element. """ def handleMatch(self, m: re.Match[str]) -> etree.Element: """ Return [`Element`][xml.etree.ElementTree.Element] of type `tag` with the string in `group(3)` of a matching pattern as the Element's text. """ el = etree.Element(self.tag) el.text = m.group(3) return el
SimpleTagPattern
python
ray-project__ray
python/ray/train/v2/tests/test_torch_gpu.py
{ "start": 4887, "end": 7584 }
class ____(LinearDataset): """Modifies the LinearDataset to also return non-tensor objects.""" def __getitem__(self, index): return {"x": self.x[index, None], "y": 2} @pytest.mark.parametrize( "dataset", (LinearDataset, LinearDatasetDict, NonTensorDataset) ) def test_torch_prepare_dataloader(ray_start_4_cpus_2_gpus, dataset): data_loader = DataLoader(dataset(a=1, b=2, size=10)) def train_fn(): wrapped_data_loader = ray.train.torch.prepare_data_loader(data_loader) # Check that DistributedSampler has been added to the data loader. assert isinstance(wrapped_data_loader.sampler, DistributedSampler) # Make sure you can properly iterate through the DataLoader. # Case where the dataset returns a tuple or list from __getitem__. if isinstance(dataset, LinearDataset): for batch in wrapped_data_loader: x = batch[0] y = batch[1] # Make sure the data is on the correct device. assert x.is_cuda and y.is_cuda # Case where the dataset returns a dict from __getitem__. elif isinstance(dataset, LinearDatasetDict): for batch in wrapped_data_loader: for x, y in zip(batch["x"], batch["y"]): # Make sure the data is on the correct device. assert x.is_cuda and y.is_cuda elif isinstance(dataset, NonTensorDataset): for batch in wrapped_data_loader: for x, y in zip(batch["x"], batch["y"]): # Make sure the data is on the correct device. assert x.is_cuda and y == 2 trainer = TorchTrainer( train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True) ) trainer.fit() def test_torch_fail_on_nccl_timeout(ray_start_4_cpus_2_gpus): """Tests that TorchTrainer raises exception on NCCL timeouts.""" def train_fn(): model = torch.nn.Linear(1, 1) model = ray.train.torch.prepare_model(model) # Rank 0 worker will never reach the collective operation. # NCCL should timeout. if ray.train.get_context().get_world_rank() == 0: while True: time.sleep(100) torch.distributed.barrier() trainer = TorchTrainer( train_fn, scaling_config=ScalingConfig(num_workers=2, use_gpu=True), torch_config=ray.train.torch.TorchConfig(timeout_s=2), ) # Training should fail and not hang. with pytest.raises(WorkerGroupError): trainer.fit() if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", "-x", __file__]))
NonTensorDataset
python
scipy__scipy
scipy/cluster/tests/test_hierarchy.py
{ "start": 26575, "end": 27905 }
class ____: def test_leaves_list_1x4(self, xp): # Tests leaves_list(Z) on a 1x4 linkage. Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) to_tree(Z) assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15) def test_leaves_list_2x4(self, xp): # Tests leaves_list(Z) on a 2x4 linkage. Z = xp.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=xp.float64) to_tree(Z) assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15) @pytest.mark.parametrize("method", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']) def test_leaves_list_Q(self, method, xp): # Tests leaves_list(Z) on the Q data set X = hierarchy_test_data.Q_X Z = xp.asarray(linkage(X, method)) node = to_tree(Z) assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15) def test_Q_subtree_pre_order(self, xp): # Tests that pre_order() works when called on sub-trees. X = hierarchy_test_data.Q_X Z = xp.asarray(linkage(X, 'single')) node = to_tree(Z) assert_allclose(node.pre_order(), (node.get_left().pre_order() + node.get_right().pre_order()), rtol=1e-15) @make_xp_test_case(correspond)
TestLeavesList
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 157767, "end": 158972 }
class ____(sgqlc.types.Input): """Autogenerated input type of CloneProject""" __schema__ = github_schema __field_names__ = ("target_owner_id", "source_id", "include_workflows", "name", "body", "public", "client_mutation_id") target_owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="targetOwnerId") """The owner ID to create the project under.""" source_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="sourceId") """The source project to clone.""" include_workflows = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="includeWorkflows") """Whether or not to clone the source project's workflows.""" name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The name of the project.""" body = sgqlc.types.Field(String, graphql_name="body") """The description of the project.""" public = sgqlc.types.Field(Boolean, graphql_name="public") """The visibility of the project, defaults to false (private).""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
CloneProjectInput
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py
{ "start": 2157, "end": 3868 }
class ____: """Wrapper for HTTP clients that intercepts requests and routes through AI Gateway.""" def __init__(self, gateway_instance, original_client, llm_instance): self.gateway = gateway_instance self.original_client = original_client self.llm = llm_instance self.provider_config = get_provider_config(llm_instance) if not self.provider_config: raise CloudflareAIGatewayError( f"Unsupported provider for LLM: {type(self.llm).__name__}" ) def __getattr__(self, name): """Delegate attribute access to the original client.""" return getattr(self.original_client, name) def post(self, url: str, **kwargs): """Intercept POST requests and route through AI Gateway.""" # Transform request for AI Gateway transformed_request = self._transform_request(url, kwargs) # Make request to AI Gateway return self.gateway._make_ai_gateway_request(transformed_request) def _transform_request(self, url: str, kwargs: Dict[str, Any]) -> Dict[str, Any]: """Transform request for AI Gateway format.""" # Extract headers and body headers = kwargs.get("headers", {}) json_data = kwargs.get("json", {}) # Get endpoint from URL endpoint = self.provider_config.transform_endpoint(url) # Pass the original request body directly to AI Gateway # AI Gateway handles provider-specific format differences internally return { "provider": self.provider_config.name, "endpoint": endpoint, "headers": headers, "query": json_data, }
AIGatewayClientWrapper
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/black/cases/form_feeds.py
{ "start": 693, "end": 995 }
class ____: def __init__(self): pass def something(self): pass # pass pass # a = 1 # pass a = 1 a = [ ] # as internal whitespace of a comment is allowed but why "form feed literal in a string is okay " # form feeds at the very end get removed.
Baz
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-huggingface-optimum/llama_index/embeddings/huggingface_optimum/base.py
{ "start": 497, "end": 6778 }
class ____(BaseEmbedding): folder_name: str = Field(description="Folder name to load from.") max_length: int = Field(description="Maximum length of input.") pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].") normalize: bool = Field(default=True, description="Normalize embeddings or not.") query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files.", default=None ) _model: Any = PrivateAttr() _tokenizer: Any = PrivateAttr() _device: Any = PrivateAttr() def __init__( self, folder_name: str, pooling: str = "cls", max_length: Optional[int] = None, normalize: bool = True, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, model: Optional[Any] = None, tokenizer: Optional[Any] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, device: Optional[str] = None, ): model = model or ORTModelForFeatureExtraction.from_pretrained(folder_name) tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name) device = device or infer_torch_device() if max_length is None: try: max_length = int(model.config.max_position_embeddings) except Exception: raise ValueError( "Unable to find max_length from model config. " "Please provide max_length." ) try: max_length = min(max_length, int(tokenizer.model_max_length)) except Exception as exc: print(f"An error occurred while retrieving tokenizer max length: {exc}") if pooling not in ["cls", "mean"]: raise ValueError(f"Pooling {pooling} not supported.") super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, folder_name=folder_name, max_length=max_length, pooling=pooling, normalize=normalize, query_instruction=query_instruction, text_instruction=text_instruction, ) self._model = model self._device = device self._tokenizer = tokenizer @classmethod def class_name(cls) -> str: return "OptimumEmbedding" @classmethod def create_and_save_optimum_model( cls, model_name_or_path: str, output_path: str, export_kwargs: Optional[dict] = None, ) -> None: try: from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer except ImportError: raise ImportError( "OptimumEmbedding requires transformers to be installed.\n" "Please install transformers with " "`pip install transformers optimum[exporters]`." ) export_kwargs = export_kwargs or {} model = ORTModelForFeatureExtraction.from_pretrained( model_name_or_path, export=True, **export_kwargs ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) model.save_pretrained(output_path) tokenizer.save_pretrained(output_path) print( f"Saved optimum model to {output_path}. Use it with " f"`embed_model = OptimumEmbedding(folder_name='{output_path}')`." ) def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any: """Mean Pooling - Take attention mask into account for correct averaging.""" import torch # First element of model_output contains all token embeddings token_embeddings = model_output[0] input_mask_expanded = ( attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() ).to(token_embeddings.device) return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( input_mask_expanded.sum(1), min=1e-9 ) def _cls_pooling(self, model_output: list) -> Any: """Use the CLS token as the pooling token.""" return model_output[0][:, 0] def _embed(self, sentences: List[str]) -> List[List[float]]: """Embed sentences.""" encoded_input = self._tokenizer( sentences, padding=True, max_length=self.max_length, truncation=True, return_tensors="pt", ) model_output = self._model(**encoded_input) if self.pooling == "cls": embeddings = self._cls_pooling(model_output) else: embeddings = self._mean_pooling( model_output, encoded_input["attention_mask"].to(self._device) ) if self.normalize: import torch embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings.tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query = format_query(query, self.model_name, self.query_instruction) return self._embed([query])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text = format_text(text, self.model_name, self.text_instruction) return self._embed([text])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" texts = [ format_text(text, self.model_name, self.text_instruction) for text in texts ] return self._embed(texts)
OptimumEmbedding
python
ray-project__ray
python/ray/serve/tests/test_api.py
{ "start": 17794, "end": 34467 }
class ____: @serve.deployment class A: pass @serve.deployment def f(): pass class TypedArgs(BaseModel): message: str num_replicas: Optional[int] def test_prebuilt_app(self): a = self.A.bind() assert call_user_app_builder_with_args_if_necessary(a, {}) == a f = self.f.bind() assert call_user_app_builder_with_args_if_necessary(f, {}) == f with pytest.raises( ValueError, match="Arguments can only be passed to an application builder function", ): call_user_app_builder_with_args_if_necessary(f, {"key": "val"}) def test_invalid_builder(self): class ThisShouldBeAFunction: pass with pytest.raises( TypeError, match=( "Expected a built Serve application " "or an application builder function" ), ): call_user_app_builder_with_args_if_necessary(ThisShouldBeAFunction, {}) def test_invalid_signature(self): def builder_with_two_args(args1, args2): return self.f.bind() with pytest.raises( TypeError, match="Application builder functions should take exactly one parameter", ): call_user_app_builder_with_args_if_necessary(builder_with_two_args, {}) def test_builder_returns_bad_type(self): def return_none(args): self.f.bind() with pytest.raises( TypeError, match="Application builder functions must return a", ): call_user_app_builder_with_args_if_necessary(return_none, {}) def return_unbound_deployment(args): return self.f with pytest.raises( TypeError, match="Application builder functions must return a", ): call_user_app_builder_with_args_if_necessary(return_unbound_deployment, {}) def test_basic_no_args(self): def build_function(args): return self.A.bind() assert isinstance( call_user_app_builder_with_args_if_necessary(build_function, {}), Application, ) def build_class(args): return self.f.bind() assert isinstance( call_user_app_builder_with_args_if_necessary(build_class, {}), Application ) def test_args_dict(self): args_dict = {"message": "hiya", "num_replicas": "3"} def build(args): assert len(args) == 2 assert args["message"] == "hiya" assert args["num_replicas"] == "3" return self.A.options(num_replicas=int(args["num_replicas"])).bind( args["message"] ) app = call_user_app_builder_with_args_if_necessary(build, args_dict) assert isinstance(app, Application) def test_args_typed(self): args_dict = {"message": "hiya", "num_replicas": "3"} def build(args): """Builder with no type hint.""" return self.A.options(num_replicas=args["num_replicas"]).bind( args["message"] ) app = call_user_app_builder_with_args_if_necessary(build, args_dict) assert isinstance(app, Application) def build(args: Dict[str, str]): """Builder with vanilla type hint.""" return self.A.options(num_replicas=args["num_replicas"]).bind( args["message"] ) app = call_user_app_builder_with_args_if_necessary(build, args_dict) assert isinstance(app, Application) class ForwardRef: def build(args: "ForwardRef"): """Builder with forward reference as type hint.""" return self.A.options(num_replicas=args["num_replicas"]).bind( args["message"] ) app = call_user_app_builder_with_args_if_necessary(ForwardRef.build, args_dict) assert isinstance(app, Application) def build(args: self.TypedArgs): """Builder with Pydantic model type hint.""" assert isinstance(args, self.TypedArgs) assert args.message == "hiya" assert args.num_replicas == 3 return self.A.options(num_replicas=args.num_replicas).bind(args.message) app = call_user_app_builder_with_args_if_necessary(build, args_dict) assert isinstance(app, Application) # Sanity check that pydantic validation works. # 1) Check that validation permits a missing optional field. def check_missing_optional(args: self.TypedArgs): assert args.message == "hiya" assert args.num_replicas is None return self.A.bind() app = call_user_app_builder_with_args_if_necessary( check_missing_optional, {"message": "hiya"} ) assert isinstance(app, Application) # 2) Check that validation rejects a missing required field. def check_missing_required(args: self.TypedArgs): assert False, "Shouldn't get here because validation failed." with pytest.raises(ValidationError, match="field required"): call_user_app_builder_with_args_if_necessary( check_missing_required, {"num_replicas": "10"} ) @pytest.mark.parametrize("use_v1_patch", [True, False]) def test_pydantic_version_compatibility(self, use_v1_patch: bool): """Check compatibility with different pydantic versions.""" if use_v1_patch: try: # Only runs if installed pydantic version is >=2.5.0 from pydantic.v1 import BaseModel except ImportError: return else: from pydantic import BaseModel cat_dict = {"color": "orange", "age": 10} class Cat(BaseModel): color: str age: int def build(args: Cat): """Builder with Pydantic model type hint.""" assert isinstance(args, Cat), f"args type: {type(args)}" assert args.color == cat_dict["color"] assert args.age == cat_dict["age"] return self.A.bind(f"My {args.color} cat is {args.age} years old.") app = call_user_app_builder_with_args_if_necessary(build, cat_dict) assert isinstance(app, Application) def test_no_slash_route_prefix(serve_instance): """Test serve run with no slash route_prefix. This test ensure when serve runs with no prefix slash in route_prefix, it will throw good error message. """ @serve.deployment def f(): pass with pytest.raises( ValueError, match=( r"Invalid route_prefix 'no_slash', " r"must start with a forward slash \('/'\)" ), ): serve.run(f.bind(), route_prefix="no_slash") def test_mutually_exclusive_max_replicas_per_node_and_placement_group_bundles(): with pytest.raises( ValueError, match=( "Setting max_replicas_per_node is not allowed when " "placement_group_bundles is provided." ), ): @serve.deployment(max_replicas_per_node=3, placement_group_bundles=[{"CPU": 1}]) def f(): pass with pytest.raises( ValueError, match=( "Setting max_replicas_per_node is not allowed when " "placement_group_bundles is provided." ), ): @serve.deployment def g(): pass g.options(max_replicas_per_node=3, placement_group_bundles=[{"CPU": 1}]) def test_status_basic(serve_instance): # Before Serve is started, serve.status() should have an empty list of applications assert len(serve.status().applications) == 0 @serve.deployment(ray_actor_options={"num_cpus": 0.1}) class A: def __call__(self, val: int): return val + 1 @serve.deployment(ray_actor_options={"num_cpus": 0.1}) def f(): return "hello world" @serve.deployment(ray_actor_options={"num_cpus": 0.1}) class MyDriver: def __init__(self, handle): self._h = handle async def __call__(self): return await self._h.remote() handle_1 = serve.run(A.bind(), name="plus", route_prefix="/a") handle_2 = serve.run(MyDriver.bind(f.bind()), name="hello", route_prefix="/b") assert handle_1.remote(8).result() == 9 assert handle_2.remote().result() == "hello world" app_status = serve.status().applications assert len(app_status) == 2 assert set(app_status["plus"].deployments.keys()) == {"A"} assert set(app_status["hello"].deployments.keys()) == {"MyDriver", "f"} for d in app_status["plus"].deployments.values(): assert d.status == "HEALTHY" and d.replica_states == {"RUNNING": 1} for d in app_status["plus"].deployments.values(): assert d.status == "HEALTHY" and d.replica_states == {"RUNNING": 1} proxy_status = serve.status().proxies assert all(p == "HEALTHY" for p in proxy_status.values()) def test_status_constructor_error(serve_instance): """Deploys Serve deployment that errors out in constructor, checks that the traceback is surfaced in serve.status(). """ @serve.deployment class A: def __init__(self): _ = 1 / 0 serve._run(A.bind(), _blocking=False) def check_for_failed_app(): default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME] error_substr = "ZeroDivisionError: division by zero" assert ( default_app.status == "DEPLOY_FAILED" and error_substr in default_app.deployments["A"].message ) assert default_app.deployments["A"].status == "DEPLOY_FAILED" return True wait_for_condition(check_for_failed_app) # Instead of hanging forever, a request to the application should # return a 503 error to reflect the failed deployment state. # The timeout is there to prevent the test from hanging and blocking # the test suite if it does fail. r = httpx.post("http://localhost:8000/", timeout=10) assert r.status_code == 503 and "unavailable" in r.text.lower() @serve.deployment class A: def __init__(self): pass serve._run(A.bind(), _blocking=False) def check_for_running_app(): default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME] assert default_app.status == "RUNNING" assert default_app.deployments["A"].status == "HEALTHY" return True wait_for_condition(check_for_running_app) @pytest.mark.skipif( sys.platform == "win32", reason="Runtime env support experimental on windows" ) def test_status_package_unavailable_in_controller(serve_instance): """Test that exceptions raised from packages that are installed on deployment actors but not on controller is serialized and surfaced properly in serve.status(). """ @serve.deployment class MyDeployment: def __init__(self): import pymysql from sqlalchemy import create_engine pymysql.install_as_MySQLdb() create_engine("mysql://some_wrong_url:3306").connect() ray_actor_options = {"runtime_env": {"pip": ["PyMySQL", "sqlalchemy==1.3.19"]}} serve._run( MyDeployment.options(ray_actor_options=ray_actor_options).bind(), _blocking=False, ) def check_for_failed_deployment(): default_app = serve.status().applications[SERVE_DEFAULT_APP_NAME] assert default_app.status == "DEPLOY_FAILED" assert "some_wrong_url" in default_app.deployments["MyDeployment"].message return True wait_for_condition(check_for_failed_deployment, timeout=60) def test_get_app_handle_basic(serve_instance): @serve.deployment(ray_actor_options={"num_cpus": 0.1}) class M: def __call__(self, val: int): return val + 1 @serve.deployment(ray_actor_options={"num_cpus": 0.1}) def f(): return "hello world" @serve.deployment(ray_actor_options={"num_cpus": 0.1}) class MyDriver: def __init__(self, handle): self._h = handle async def __call__(self): return await self._h.remote() serve.run(M.bind(), name="A", route_prefix="/a") serve.run(MyDriver.bind(f.bind()), name="B", route_prefix="/b") handle = serve.get_app_handle("A") assert handle.remote(8).result() == 9 handle = serve.get_app_handle("B") assert handle.remote().result() == "hello world" def test_get_app_handle_dne(serve_instance): """Test getting app handle to an app that doesn't exist.""" with pytest.raises(RayServeException) as e: serve.get_app_handle("random") assert "Application 'random' does not exist" in str(e.value) def test_get_app_handle_within_deployment_async(serve_instance): @serve.deployment() class a: def __init__(self, handle): self.handle = handle def __call__(self, val: int): return val + 2 @serve.deployment() class b: def __call__(self, val: int): return val @serve.deployment async def f(val): handle = serve.get_app_handle(SERVE_DEFAULT_APP_NAME) result = await handle.remote(val) return f"The answer is {result}" serve.run(a.bind(b.bind()), route_prefix="/math") serve.run(f.bind(), name="call") handle = serve.get_app_handle("call") assert handle.remote(7).result() == "The answer is 9" def test_get_deployment_handle_basic(serve_instance): @serve.deployment(ray_actor_options={"num_cpus": 0.1}) def f(): return "hello world" @serve.deployment(ray_actor_options={"num_cpus": 0.1}) class MyDriver: def __init__(self, handle): self._h = handle async def __call__(self): return f"{await self._h.remote()}!!" serve.run(MyDriver.bind(f.bind())) handle = serve.get_deployment_handle("f", SERVE_DEFAULT_APP_NAME) assert isinstance(handle, DeploymentHandle) assert handle.remote().result() == "hello world" app_handle = serve.get_app_handle(SERVE_DEFAULT_APP_NAME) assert isinstance(app_handle, DeploymentHandle) assert app_handle.remote().result() == "hello world!!" def test_deployment_handle_nested_in_obj(serve_instance): """Test binding a handle within a custom object.""" class HandleWrapper: def __init__(self, handle: DeploymentHandle): self._handle = handle def get(self) -> DeploymentHandle: return self._handle @serve.deployment def f() -> str: return "hi" @serve.deployment class MyDriver: def __init__(self, handle_wrapper: HandleWrapper): self.handle_wrapper = handle_wrapper async def __call__(self) -> str: return await self.handle_wrapper.get().remote() handle_wrapper = HandleWrapper(f.bind()) h = serve.run(MyDriver.bind(handle_wrapper)) assert h.remote().result() == "hi" def test_max_ongoing_requests_none(serve_instance): """We should not allow setting `max_ongoing_requests` to None.""" def get_max_ongoing_requests(): details = serve_instance.get_serve_details() return details["applications"]["default"]["deployments"]["A"][ "deployment_config" ]["max_ongoing_requests"] class A: pass with pytest.raises(ValueError): serve.deployment(max_ongoing_requests=None)(A).bind() with pytest.raises(ValueError): serve.deployment(A).options(max_ongoing_requests=None).bind() serve.run(serve.deployment(A).bind()) assert get_max_ongoing_requests() == DEFAULT_MAX_ONGOING_REQUESTS serve.run( serve.deployment(max_ongoing_requests=8, graceful_shutdown_timeout_s=2)( A ).bind() ) assert get_max_ongoing_requests() == 8 serve.run(serve.deployment(A).options(max_ongoing_requests=12).bind()) assert get_max_ongoing_requests() == 12 def test_deploy_app_with_custom_request_router(serve_instance): """Test deploying an app with a custom request router configured in the deployment decorator.""" handle = serve.run(AppWithCustomRequestRouter.bind()) assert handle.remote().result() == "Hello, world!" @serve.deployment( request_router_config=RequestRouterConfig( request_router_class="ray.serve.tests.test_api.FakeRequestRouter", request_router_kwargs=dict(test_parameter=4848), ) )
TestAppBuilder
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/triggers/dataproc.py
{ "start": 3350, "end": 10106 }
class ____(DataprocBaseTrigger): """ DataprocSubmitTrigger run on the trigger worker to perform create Build operation. :param job_id: The ID of a Dataproc job. :param project_id: Google Cloud Project where the job is running :param region: The Cloud Dataproc region in which to handle the request. :param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param polling_interval_seconds: polling period in seconds to check for the status """ def __init__(self, job_id: str, **kwargs): self.job_id = job_id super().__init__(**kwargs) def serialize(self): return ( "airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger", { "job_id": self.job_id, "project_id": self.project_id, "region": self.region, "gcp_conn_id": self.gcp_conn_id, "impersonation_chain": self.impersonation_chain, "polling_interval_seconds": self.polling_interval_seconds, "cancel_on_kill": self.cancel_on_kill, }, ) if not AIRFLOW_V_3_0_PLUS: @provide_session def get_task_instance(self, session: Session) -> TaskInstance: """ Get the task instance for the current task. :param session: Sqlalchemy session """ query = session.query(TaskInstance).filter( TaskInstance.dag_id == self.task_instance.dag_id, TaskInstance.task_id == self.task_instance.task_id, TaskInstance.run_id == self.task_instance.run_id, TaskInstance.map_index == self.task_instance.map_index, ) task_instance = query.one_or_none() if task_instance is None: raise AirflowException( "TaskInstance with dag_id: %s,task_id: %s, run_id: %s and map_index: %s is not found", self.task_instance.dag_id, self.task_instance.task_id, self.task_instance.run_id, self.task_instance.map_index, ) return task_instance async def get_task_state(self): from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance task_states_response = await sync_to_async(RuntimeTaskInstance.get_task_states)( dag_id=self.task_instance.dag_id, task_ids=[self.task_instance.task_id], run_ids=[self.task_instance.run_id], map_index=self.task_instance.map_index, ) try: task_state = task_states_response[self.task_instance.run_id][self.task_instance.task_id] except Exception: raise AirflowException( "TaskInstance with dag_id: %s, task_id: %s, run_id: %s and map_index: %s is not found", self.task_instance.dag_id, self.task_instance.task_id, self.task_instance.run_id, self.task_instance.map_index, ) return task_state async def safe_to_cancel(self) -> bool: """ Whether it is safe to cancel the external job which is being executed by this trigger. This is to avoid the case that `asyncio.CancelledError` is called because the trigger itself is stopped. Because in those cases, we should NOT cancel the external job. """ if AIRFLOW_V_3_0_PLUS: task_state = await self.get_task_state() else: # Database query is needed to get the latest state of the task instance. task_instance = self.get_task_instance() # type: ignore[call-arg] task_state = task_instance.state return task_state != TaskInstanceState.DEFERRED async def run(self): try: while True: job = await self.get_async_hook().get_job( project_id=self.project_id, region=self.region, job_id=self.job_id ) state = job.status.state self.log.info("Dataproc job: %s is in state: %s", self.job_id, state) if state in (JobStatus.State.DONE, JobStatus.State.CANCELLED, JobStatus.State.ERROR): break await asyncio.sleep(self.polling_interval_seconds) yield TriggerEvent( {"job_id": self.job_id, "job_state": JobStatus.State(state).name, "job": Job.to_dict(job)} ) except asyncio.CancelledError: self.log.info("Task got cancelled.") try: if self.job_id and self.cancel_on_kill and await self.safe_to_cancel(): self.log.info( "Cancelling the job as it is safe to do so. Note that the airflow TaskInstance is not" " in deferred state." ) self.log.info("Cancelling the job: %s", self.job_id) # The synchronous hook is utilized to delete the cluster when a task is cancelled. This # is because the asynchronous hook deletion is not awaited when the trigger task is # cancelled. The call for deleting the cluster or job through the sync hook is not a # blocking call, which means it does not wait until the cluster or job is deleted. self.get_sync_hook().cancel_job( job_id=self.job_id, project_id=self.project_id, region=self.region ) self.log.info("Job: %s is cancelled", self.job_id) yield TriggerEvent( { "job_id": self.job_id, "job_state": ClusterStatus.State.DELETING.name, } ) except Exception as e: self.log.error("Failed to cancel the job: %s with error : %s", self.job_id, str(e)) raise e
DataprocSubmitTrigger
python
wandb__wandb
wandb/vendor/pygments/lexers/verification.py
{ "start": 1880, "end": 3705 }
class ____(RegexLexer): """ For `Silver <https://bitbucket.org/viperproject/silver>`_ source code. .. versionadded:: 2.2 """ name = 'Silver' aliases = ['silver'] filenames = ['*.sil', '*.vpr'] tokens = { 'root': [ # Whitespace and Comments (r'\n', Whitespace), (r'\s+', Whitespace), (r'//[/!](.*?)\n', Comment.Doc), (r'//(.*?)\n', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), (words(( 'result', 'true', 'false', 'null', 'method', 'function', 'predicate', 'program', 'domain', 'axiom', 'var', 'returns', 'field', 'define', 'requires', 'ensures', 'invariant', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert', 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh', 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection', 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists', 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique', 'apply', 'package', 'folding', 'label', 'forperm'), suffix=r'\b'), Keyword), (words(('Int', 'Perm', 'Bool', 'Ref'), suffix=r'\b'), Keyword.Type), include('numbers'), (r'[!%&*+=|?:<>/\-\[\]]', Operator), (r'([{}():;,.])', Punctuation), # Identifier (r'[\w$]\w*', Name), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'numbers': [ (r'[0-9]+', Number.Integer), ], }
SilverLexer
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
{ "start": 41077, "end": 41360 }
class ____: def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process
_DateTimeBase
python
pandas-dev__pandas
pandas/tests/tseries/offsets/common.py
{ "start": 802, "end": 901 }
class ____: MON = 0 TUE = 1 WED = 2 THU = 3 FRI = 4 SAT = 5 SUN = 6
WeekDay
python
ansible__ansible
lib/ansible/modules/user.py
{ "start": 64745, "end": 70680 }
class ____(User): """ This is a OpenBSD User manipulation class. Main differences are that OpenBSD:- - has no concept of "system" account. - has no force delete user This overrides the following methods from the generic class:- - create_user() - remove_user() - modify_user() """ platform = 'OpenBSD' distribution = None SHADOWFILE = '/etc/master.passwd' def create_user(self): cmd = [self.module.get_bin_path('useradd', True)] if self.uid is not None: cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) cmd.append('-g') cmd.append(self.group) if self.groups is not None: groups = self.get_groups_set() cmd.append('-G') cmd.append(','.join(groups)) if self.comment is not None: cmd.append('-c') cmd.append(self.comment) if self.home is not None: cmd.append('-d') cmd.append(self.home) if self.shell is not None: cmd.append('-s') cmd.append(self.shell) if self.login_class is not None: cmd.append('-L') cmd.append(self.login_class) if self.password is not None and self.password != '*': cmd.append('-p') cmd.append(self.password) if self.create_home: cmd.append('-m') if self.skeleton is not None: cmd.append('-k') cmd.append(self.skeleton) if self.umask is not None: cmd.append('-K') cmd.append('UMASK=' + self.umask) if self.inactive is not None: cmd.append('-f') cmd.append(self.inactive) if self.uid_min is not None: cmd.append('-K') cmd.append('UID_MIN=' + str(self.uid_min)) if self.uid_max is not None: cmd.append('-K') cmd.append('UID_MAX=' + str(self.uid_max)) cmd.append(self.name) return self.execute_command(cmd) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def modify_user(self): cmd = [self.module.get_bin_path('usermod', True)] info = self.user_info() if self.uid is not None and info[2] != int(self.uid): cmd.append('-u') cmd.append(self.uid) if self.non_unique: cmd.append('-o') if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) ginfo = self.group_info(self.group) if info[3] != ginfo[2]: cmd.append('-g') cmd.append(self.group) if self.groups is not None: current_groups = self.user_group_membership() groups_need_mod = False groups_option = '-S' groups = [] if self.groups == '': if current_groups and not self.append: groups_need_mod = True else: groups = self.get_groups_set(names_only=True) group_diff = set(current_groups).symmetric_difference(groups) if group_diff: if self.append: for g in groups: if g in group_diff: groups_option = '-G' groups_need_mod = True break else: groups_need_mod = True if groups_need_mod: cmd.append(groups_option) cmd.append(','.join(groups)) if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) if self.home is not None and info[5] != self.home: if self.move_home: cmd.append('-m') cmd.append('-d') cmd.append(self.home) if self.shell is not None and info[6] != self.shell: cmd.append('-s') cmd.append(self.shell) if self.inactive is not None: cmd.append('-f') cmd.append(self.inactive) if self.login_class is not None: # find current login class user_login_class = None userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name] (rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False) for line in out.splitlines(): tokens = line.split() if tokens[0] == 'class' and len(tokens) == 2: user_login_class = tokens[1] # act only if login_class change if self.login_class != user_login_class: cmd.append('-L') cmd.append(self.login_class) if self.password_lock and not info[1].startswith('*'): cmd.append('-Z') elif self.password_lock is False and info[1].startswith('*'): cmd.append('-U') if self.update_password == 'always' and self.password is not None \ and self.password != '*' and info[1] != self.password: cmd.append('-p') cmd.append(self.password) # skip if no changes to be made if len(cmd) == 1: return (None, '', '') cmd.append(self.name) return self.execute_command(cmd)
OpenBSDUser
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/refurb/FURB118.py
{ "start": 2574, "end": 2918 }
class ____: a = final(lambda self, other: self == other) b = override(lambda self, other: self == other) c = no_type_check(lambda self, other: self == other) d = final(override(no_type_check(lambda self, other: self == other))) # lambdas used in decorators do not constitute method definitions, # so these *should* be flagged:
Foo
python
django__django
tests/model_meta/models.py
{ "start": 1733, "end": 2751 }
class ____(AbstractPerson): # DATA fields data_base = models.CharField(max_length=10) fk_base = models.ForeignKey(Relation, models.CASCADE, related_name="fk_base_rel") # M2M fields m2m_base = models.ManyToManyField(Relation, related_name="m2m_base_rel") friends_base = models.ManyToManyField("self", symmetrical=True) following_base = models.ManyToManyField( "self", related_name="followers_base", symmetrical=False ) # VIRTUAL fields data_not_concrete_base = models.ForeignObject( Relation, on_delete=models.CASCADE, from_fields=["base_non_concrete_id"], to_fields=["id"], related_name="fo_base_rel", ) # GFK fields content_type_base = models.ForeignKey(ContentType, models.CASCADE, related_name="+") object_id_base = models.PositiveIntegerField() content_object_base = GenericForeignKey("content_type_base", "object_id_base") # GR fields generic_relation_base = GenericRelation(Relation)
BasePerson
python
spack__spack
lib/spack/spack/vendor/jinja2/nodes.py
{ "start": 12133, "end": 12462 }
class ____(Stmt): """Specific node for with statements. In older versions of Jinja the with statement was implemented on the base of the `Scope` node instead. .. versionadded:: 2.9.3 """ fields = ("targets", "values", "body") targets: t.List["Expr"] values: t.List["Expr"] body: t.List[Node]
With
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol17.py
{ "start": 1107, "end": 1361 }
class ____(Protocol[_T1_co]): # This should generate an error because a covariant TypeVar # should not be used as a parameter type. def m1(self, p0: _T1_co) -> None: ... # This should generate an error because _T1 should be covariant.
Protocol5
python
bottlepy__bottle
bottle.py
{ "start": 157131, "end": 158191 }
class ____(BaseTemplate): def prepare(self, filters=None, tests=None, globals={}, **kwargs): from jinja2 import Environment, FunctionLoader self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if globals: self.env.globals.update(globals) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.name) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): if name == self.filename: fname = name else: fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return (f.read().decode(self.encoding), fname, lambda: False)
Jinja2Template
python
tornadoweb__tornado
tornado/test/httputil_test.py
{ "start": 16967, "end": 18568 }
class ____(unittest.TestCase): # Make sure that all the input types are supported. TIMESTAMP = 1359312200.503611 EXPECTED = "Sun, 27 Jan 2013 18:43:20 GMT" def check(self, value): self.assertEqual(format_timestamp(value), self.EXPECTED) def test_unix_time_float(self): self.check(self.TIMESTAMP) def test_unix_time_int(self): self.check(int(self.TIMESTAMP)) def test_struct_time(self): self.check(time.gmtime(self.TIMESTAMP)) def test_time_tuple(self): tup = tuple(time.gmtime(self.TIMESTAMP)) self.assertEqual(9, len(tup)) self.check(tup) def test_utc_naive_datetime(self): self.check( datetime.datetime.fromtimestamp( self.TIMESTAMP, datetime.timezone.utc ).replace(tzinfo=None) ) def test_utc_naive_datetime_deprecated(self): with ignore_deprecation(): self.check(datetime.datetime.utcfromtimestamp(self.TIMESTAMP)) def test_utc_aware_datetime(self): self.check( datetime.datetime.fromtimestamp(self.TIMESTAMP, datetime.timezone.utc) ) def test_other_aware_datetime(self): # Other timezones are ignored; the timezone is always printed as GMT self.check( datetime.datetime.fromtimestamp( self.TIMESTAMP, datetime.timezone(datetime.timedelta(hours=-4)) ) ) # HTTPServerRequest is mainly tested incidentally to the server itself, # but this tests the parts of the class that can be tested in isolation.
FormatTimestampTest
python
scrapy__scrapy
tests/test_downloader_handlers_http_base.py
{ "start": 28699, "end": 31704 }
class ____(ABC): is_secure = False expected_http_proxy_request_body = b"http://example.com" @property @abstractmethod def download_handler_cls(self) -> type[DownloadHandlerProtocol]: raise NotImplementedError @pytest.fixture(scope="session") def proxy_mockserver(self) -> Generator[ProxyEchoMockServer]: with ProxyEchoMockServer() as proxy: yield proxy @async_yield_fixture async def download_handler(self) -> AsyncGenerator[DownloadHandlerProtocol]: dh = build_from_crawler(self.download_handler_cls, get_crawler()) yield dh await close_dh(dh) @deferred_f_from_coro_f async def test_download_with_proxy( self, proxy_mockserver: ProxyEchoMockServer, download_handler: DownloadHandlerProtocol, ) -> None: http_proxy = proxy_mockserver.url("", is_secure=self.is_secure) request = Request("http://example.com", meta={"proxy": http_proxy}) response = await download_request(download_handler, request) assert response.status == 200 assert response.url == request.url assert response.body == self.expected_http_proxy_request_body @deferred_f_from_coro_f async def test_download_without_proxy( self, proxy_mockserver: ProxyEchoMockServer, download_handler: DownloadHandlerProtocol, ) -> None: request = Request( proxy_mockserver.url("/path/to/resource", is_secure=self.is_secure) ) response = await download_request(download_handler, request) assert response.status == 200 assert response.url == request.url assert response.body == b"/path/to/resource" @deferred_f_from_coro_f async def test_download_with_proxy_https_timeout( self, proxy_mockserver: ProxyEchoMockServer, download_handler: DownloadHandlerProtocol, ) -> None: if NON_EXISTING_RESOLVABLE: pytest.skip("Non-existing hosts are resolvable") http_proxy = proxy_mockserver.url("", is_secure=self.is_secure) domain = "https://no-such-domain.nosuch" request = Request(domain, meta={"proxy": http_proxy, "download_timeout": 0.2}) with pytest.raises(error.TimeoutError) as exc_info: await download_request(download_handler, request) assert domain in exc_info.value.osError @deferred_f_from_coro_f async def test_download_with_proxy_without_http_scheme( self, proxy_mockserver: ProxyEchoMockServer, download_handler: DownloadHandlerProtocol, ) -> None: http_proxy = f"{proxy_mockserver.host}:{proxy_mockserver.port()}" request = Request("http://example.com", meta={"proxy": http_proxy}) response = await download_request(download_handler, request) assert response.status == 200 assert response.url == request.url assert response.body == self.expected_http_proxy_request_body
TestHttpProxyBase
python
PyCQA__pylint
tests/functional/i/invalid/invalid_enum_extension.py
{ "start": 798, "end": 920 }
class ____(Enum): red: None = None def __init__(self, red: None) -> None: self.red = red
IncorrectColorEnum
python
pytorch__pytorch
test/distributed/test_device_mesh.py
{ "start": 2013, "end": 2601 }
class ____(DTensorTestBase): @property def backend(self): return "gloo" @with_comms def test_device_mesh_reuse_default_group(self): mesh = init_device_mesh(self.device_type, (self.world_size,)) mesh_group = mesh.get_group() default_group = _get_default_group() if torch.cuda.is_available(): self.assertNotEqual(mesh_group, default_group) self.assertEqual(get_world_size(mesh_group), get_world_size(default_group)) else: self.assertEqual(mesh_group, default_group)
DeviceMeshTestGlooBackend
python
pytorch__pytorch
torch/_inductor/ir.py
{ "start": 188286, "end": 191287 }
class ____(OperationBuffer): inputs: Sequence[Union[IRNode, Sequence[IRNode]]] def input_name(self, i: int) -> str: input = self.inputs[i] assert isinstance(input, IRNode) return input.get_name() def get_read_writes(self) -> dependencies.ReadWrites: reads = OrderedSet[dependencies.Dep]() StarDep = dependencies.StarDep for input in self.inputs: if isinstance(input, Sequence): reads.update(StarDep(x.get_name()) for x in input) elif isinstance(input, ShapeAsConstantBuffer): # Skip creating dependency for symbolics as they're visible globally continue else: reads.add(StarDep(input.get_name())) writes = OrderedSet[dependencies.Dep]( StarDep(buf.get_name()) for buf in self.get_outputs() ) return dependencies.ReadWrites( reads=reads, writes=writes, index_exprs=OrderedSet(), ) def get_reads(self) -> OrderedSet[Dep]: return self.get_read_writes().reads @classmethod def unwrap_storage_for_input(cls, x: IRNode) -> IRNode: if isinstance(x, TensorBox): x = x.data if isinstance(x, StorageBox): x = x.data if isinstance(x, BaseView) and not isinstance(x, ReinterpretView): x = ExternKernel.realize_input(x) if isinstance(x, TensorBox): # when converting to ReinterpretView fails in the # realize_input call above, the result will be wrapped # into TensorBox / StorageBox pair as a result of the # cls.copy_input call; so we should unwrap recursively return cls.unwrap_storage_for_input(x) if isinstance(x, TorchBindObject): return x assert isinstance(x, (Buffer, ReinterpretView)), type(x) return x @staticmethod def unwrap_storage( inputs: Sequence[Union[IRNode, Sequence[IRNode]]], ) -> list[Union[IRNode, Sequence[IRNode]]]: inputs_new: list[Union[IRNode, Sequence[IRNode]]] = [] for x in inputs: if isinstance(x, Sequence): x = [InputsKernel.unwrap_storage_for_input(i) for i in x] else: x = InputsKernel.unwrap_storage_for_input(x) inputs_new.append(x) return inputs_new def is_extern(self) -> bool: return True def num_reads(self) -> int: return 1 @cache_on_self_and_args("InputsKernel") def get_free_symbol_uses( self, unbacked_only: bool = False ) -> OrderedSet[sympy.Symbol]: r = OrderedSet[sympy.Symbol]() for inp in self.inputs: if isinstance(inp, IRNode): r |= inp.get_free_symbol_uses(unbacked_only) else: for inner_inp in inp: r |= inner_inp.get_free_symbol_uses(unbacked_only) return r
InputsKernel
python
dagster-io__dagster
python_modules/libraries/dagster-census/dagster_census/translator.py
{ "start": 516, "end": 1302 }
class ____: """A record representing all content in a Census workspace. Provided as context for the translator so that it can resolve dependencies between content. """ syncs: list[CensusSync] @property def syncs_by_id(self) -> Mapping[int, CensusSync]: """Returns a mapping of sync IDs to CensusSync objects.""" return {sync.id: sync for sync in self.syncs} def generate_table_schema(sync_mappings_props: list[dict[str, Any]]) -> dg.TableSchema: return dg.TableSchema( columns=sorted( [ dg.TableColumn(name=mapping["to"], type=mapping.get("field_type", "unknown")) for mapping in sync_mappings_props ], key=lambda col: col.name, ) )
CensusWorkspaceData
python
marshmallow-code__apispec
tests/test_core.py
{ "start": 24569, "end": 44278 }
class ____(RefsSchemaTestMixin): paths = { "/pet/{petId}": { "get": { "parameters": [ { "required": True, "format": "int64", "name": "petId", "in": "path", "type": "integer", "description": "ID of pet that needs to be fetched", } ], "responses": { "200": {"description": "successful operation"}, "400": {"description": "Invalid ID supplied"}, "404": {"description": "Pet not found"}, }, "produces": ["application/json", "application/xml"], "operationId": "getPetById", "summary": "Find pet by ID", "description": ( "Returns a pet when ID < 10. " "ID > 10 or nonintegers will simulate API error conditions" ), "tags": ["pet"], } } } def test_path(self, spec): route_spec = self.paths["/pet/{petId}"]["get"] spec.path( path="/pet/{petId}", operations=dict( get=dict( parameters=route_spec["parameters"], responses=route_spec["responses"], produces=route_spec["produces"], operationId=route_spec["operationId"], summary=route_spec["summary"], description=route_spec["description"], tags=route_spec["tags"], ) ), ) p = get_paths(spec)["/pet/{petId}"]["get"] assert p["parameters"] == route_spec["parameters"] assert p["responses"] == route_spec["responses"] assert p["operationId"] == route_spec["operationId"] assert p["summary"] == route_spec["summary"] assert p["description"] == route_spec["description"] assert p["tags"] == route_spec["tags"] def test_paths_maintain_order(self, spec): spec.path(path="/path1") spec.path(path="/path2") spec.path(path="/path3") spec.path(path="/path4") assert list(spec.to_dict()["paths"].keys()) == [ "/path1", "/path2", "/path3", "/path4", ] def test_path_is_chainable(self, spec): spec.path(path="/path1").path("/path2") assert list(spec.to_dict()["paths"].keys()) == ["/path1", "/path2"] def test_path_methods_maintain_order(self, spec): methods = ["get", "post", "put", "patch", "delete", "head", "options"] for method in methods: spec.path(path="/path", operations={method: {}}) assert list(spec.to_dict()["paths"]["/path"]) == methods def test_path_merges_paths(self, spec): """Test that adding a second HTTP method to an existing path performs a merge operation instead of an overwrite""" path = "/pet/{petId}" route_spec = self.paths[path]["get"] spec.path(path=path, operations=dict(get=route_spec)) spec.path( path=path, operations=dict( put=dict( parameters=route_spec["parameters"], responses=route_spec["responses"], produces=route_spec["produces"], operationId="updatePet", summary="Updates an existing Pet", description="Use this method to make changes to Pet `petId`", tags=route_spec["tags"], ) ), ) p = get_paths(spec)[path] assert "get" in p assert "put" in p @pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0")) def test_path_called_twice_with_same_operations_parameters(self, openapi_version): """Test calling path twice with same operations or parameters operations and parameters being mutated by clean_operations and plugin helpers should not make path fail on second call """ class TestPlugin(BasePlugin): def path_helper(self, path, operations, parameters, **kwargs): """Mutate operations and parameters""" operations.update({"post": {"responses": {"201": "201ResponseRef"}}}) parameters.append("ParamRef_3") return path spec = APISpec( title="Swagger Petstore", version="1.0.0", openapi_version=openapi_version, plugins=[TestPlugin()], ) path = "/pet/{petId}" parameters = ["ParamRef_1"] operation = { "parameters": ["ParamRef_2"], "responses": {"200": "200ResponseRef"}, } spec.path(path=path, operations={"get": operation}, parameters=parameters) spec.path(path=path, operations={"put": operation}, parameters=parameters) operations = (get_paths(spec))[path] assert ( operations["get"] == operations["put"] == { "parameters": [build_ref(spec, "parameter", "ParamRef_2")], "responses": {"200": build_ref(spec, "response", "200ResponseRef")}, } ) assert operations["parameters"] == [ build_ref(spec, "parameter", "ParamRef_1"), build_ref(spec, "parameter", "ParamRef_3"), ] def test_path_ensures_path_parameters_required(self, spec): path = "/pet/{petId}" spec.path( path=path, operations=dict(put=dict(parameters=[{"name": "petId", "in": "path"}])), ) assert get_paths(spec)[path]["put"]["parameters"][0]["required"] is True def test_path_with_no_path_raises_error(self, spec): message = "Path template is not specified" with pytest.raises(APISpecError, match=message): spec.path() def test_path_summary_description(self, spec): summary = "Operations on a Pet" description = "Operations on a Pet identified by its ID" spec.path(path="/pet/{petId}", summary=summary, description=description) p = get_paths(spec)["/pet/{petId}"] assert p["summary"] == summary assert p["description"] == description def test_path_resolves_parameter(self, spec): route_spec = self.paths["/pet/{petId}"]["get"] spec.components.parameter("test_parameter", "path", route_spec["parameters"][0]) spec.path( path="/pet/{petId}", operations={"get": {"parameters": ["test_parameter"]}} ) p = get_paths(spec)["/pet/{petId}"]["get"] assert p["parameters"][0] == build_ref(spec, "parameter", "test_parameter") @pytest.mark.parametrize( "parameters", ([{"name": "petId"}], [{"in": "path"}]), # missing "in" # missing "name" ) def test_path_invalid_parameter(self, spec, parameters): path = "/pet/{petId}" with pytest.raises(InvalidParameterError): spec.path(path=path, operations=dict(put={}, get={}), parameters=parameters) def test_parameter_duplicate(self, spec): spec.path( path="/pet/{petId}", operations={ "get": { "parameters": [ {"name": "petId", "in": "path"}, {"name": "petId", "in": "query"}, ] } }, ) with pytest.raises(DuplicateParameterError): spec.path( path="/pet/{petId}", operations={ "get": { "parameters": [ {"name": "petId", "in": "path"}, {"name": "petId", "in": "path"}, ] } }, ) def test_global_parameters(self, spec): path = "/pet/{petId}" route_spec = self.paths["/pet/{petId}"]["get"] spec.components.parameter("test_parameter", "path", route_spec["parameters"][0]) spec.path( path=path, operations=dict(put={}, get={}), parameters=[{"name": "petId", "in": "path"}, "test_parameter"], ) assert get_paths(spec)[path]["parameters"] == [ {"name": "petId", "in": "path", "required": True}, build_ref(spec, "parameter", "test_parameter"), ] def test_global_parameter_duplicate(self, spec): path = "/pet/{petId}" spec.path( path=path, operations=dict(put={}, get={}), parameters=[ {"name": "petId", "in": "path"}, {"name": "petId", "in": "query"}, ], ) assert get_paths(spec)[path]["parameters"] == [ {"name": "petId", "in": "path", "required": True}, {"name": "petId", "in": "query"}, ] with pytest.raises(DuplicateParameterError): spec.path( path=path, operations=dict(put={}, get={}), parameters=[ {"name": "petId", "in": "path"}, {"name": "petId", "in": "path"}, "test_parameter", ], ) def test_path_resolves_response(self, spec): route_spec = self.paths["/pet/{petId}"]["get"] spec.components.response("test_response", route_spec["responses"]["200"]) spec.path( path="/pet/{petId}", operations={"get": {"responses": {"200": "test_response"}}}, ) p = get_paths(spec)["/pet/{petId}"]["get"] assert p["responses"]["200"] == build_ref(spec, "response", "test_response") def test_path_response_with_HTTPStatus_code(self, spec): code = HTTPStatus(200) spec.path( path="/pet/{petId}", operations={"get": {"responses": {code: "test_response"}}}, ) assert "200" in get_paths(spec)["/pet/{petId}"]["get"]["responses"] def test_path_response_with_status_code_range(self, spec, recwarn): status_code = "2XX" spec.path( path="/pet/{petId}", operations={"get": {"responses": {status_code: "test_response"}}}, ) if spec.openapi_version.major < 3: assert len(recwarn) == 1 assert recwarn.pop(UserWarning) assert status_code in get_paths(spec)["/pet/{petId}"]["get"]["responses"] def test_path_check_invalid_http_method(self, spec): spec.path("/pet/{petId}", operations={"get": {}}) spec.path("/pet/{petId}", operations={"x-dummy": {}}) message = "One or more HTTP methods are invalid" with pytest.raises(APISpecError, match=message): spec.path("/pet/{petId}", operations={"dummy": {}}) def test_path_resolve_response_schema(self, spec): schema = {"schema": "PetSchema"} if spec.openapi_version.major >= 3: schema = {"content": {"application/json": schema}} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": schema}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] if spec.openapi_version.major < 3: schema = resp["schema"] else: schema = resp["content"]["application/json"]["schema"] assert schema == build_ref(spec, "schema", "PetSchema") # callbacks only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_callbacks(self, spec): parameter = {"name": "petId", "in": "query", "schema": "PetSchema"} spec.path( "/pet/{petId}", operations={ "get": { "callbacks": { "onEvent": { "/callback/{petId}": { "post": { "parameters": [parameter], "requestBody": { "content": { "application/json": {"schema": "PetSchema"} } }, "responses": { "200": { "content": { "application/json": { "schema": "PetSchema" } } } }, } } } }, } }, ) path = get_paths(spec)["/pet/{petId}"] schema_ref = build_ref(spec, "schema", "PetSchema") callback_op = path["get"]["callbacks"]["onEvent"]["/callback/{petId}"]["post"] assert callback_op["parameters"][0]["schema"] == schema_ref assert ( callback_op["requestBody"]["content"]["application/json"]["schema"] == schema_ref ) assert ( callback_op["responses"]["200"]["content"]["application/json"]["schema"] == schema_ref ) # requestBody only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_request_body(self, spec): spec.path( "/pet/{petId}", operations={ "get": { "requestBody": { "content": {"application/json": {"schema": "PetSchema"}} } } }, ) assert get_paths(spec)["/pet/{petId}"]["get"]["requestBody"]["content"][ "application/json" ]["schema"] == build_ref(spec, "schema", "PetSchema") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_response_header(self, spec): response = {"headers": {"header_1": "Header_1"}} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": response}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] header_1 = resp["headers"]["header_1"] assert header_1 == build_ref(spec, "header", "Header_1") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_response_header_schema(self, spec): response = {"headers": {"header_1": {"name": "Pet", "schema": "PetSchema"}}} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": response}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] header_1 = resp["headers"]["header_1"] assert header_1["schema"] == build_ref(spec, "schema", "PetSchema") # "headers" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_response_header_examples(self, spec): response = { "headers": { "header_1": {"name": "Pet", "examples": {"example_1": "Example_1"}} } } spec.path("/pet/{petId}", operations={"get": {"responses": {"200": response}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] header_1 = resp["headers"]["header_1"] assert header_1["examples"]["example_1"] == build_ref( spec, "example", "Example_1" ) # "examples" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_response_examples(self, spec): response = { "content": {"application/json": {"examples": {"example_1": "Example_1"}}} } spec.path("/pet/{petId}", operations={"get": {"responses": {"200": response}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] example_1 = resp["content"]["application/json"]["examples"]["example_1"] assert example_1 == build_ref(spec, "example", "Example_1") # "examples" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_request_body_examples(self, spec): request_body = { "content": {"application/json": {"examples": {"example_1": "Example_1"}}} } spec.path("/pet/{petId}", operations={"get": {"requestBody": request_body}}) reqbdy = get_paths(spec)["/pet/{petId}"]["get"]["requestBody"] example_1 = reqbdy["content"]["application/json"]["examples"]["example_1"] assert example_1 == build_ref(spec, "example", "Example_1") # "examples" components section only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_parameter_examples(self, spec): parameter = { "name": "test", "in": "query", "examples": {"example_1": "Example_1"}, } spec.path("/pet/{petId}", operations={"get": {"parameters": [parameter]}}) param = get_paths(spec)["/pet/{petId}"]["get"]["parameters"][0] example_1 = param["examples"]["example_1"] assert example_1 == build_ref(spec, "example", "Example_1") def test_path_resolve_parameter_schemas(self, spec): parameter = {"name": "test", "in": "query", "schema": "PetSchema"} spec.path("/pet/{petId}", operations={"get": {"parameters": [parameter]}}) param = get_paths(spec)["/pet/{petId}"]["get"]["parameters"][0] assert param["schema"] == build_ref(spec, "schema", "PetSchema") def test_path_resolve_refs_in_response_schema(self, spec): if spec.openapi_version.major >= 3: schema = {"content": {"application/json": {"schema": self.REFS_SCHEMA}}} else: schema = {"schema": self.REFS_SCHEMA} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": schema}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] if spec.openapi_version.major < 3: schema = resp["schema"] else: schema = resp["content"]["application/json"]["schema"] self.assert_schema_refs(spec, schema) def test_path_resolve_refs_in_parameter_schema(self, spec): schema = copy.copy({"schema": self.REFS_SCHEMA}) schema["in"] = "query" schema["name"] = "test" spec.path("/pet/{petId}", operations={"get": {"parameters": [schema]}}) schema = get_paths(spec)["/pet/{petId}"]["get"]["parameters"][0]["schema"] self.assert_schema_refs(spec, schema) # requestBody only exists in OAS 3 @pytest.mark.parametrize("spec", ("3.0.0",), indirect=True) def test_path_resolve_refs_in_request_body_schema(self, spec): schema = {"content": {"application/json": {"schema": self.REFS_SCHEMA}}} spec.path("/pet/{petId}", operations={"get": {"responses": {"200": schema}}}) resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"] schema = resp["content"]["application/json"]["schema"] self.assert_schema_refs(spec, schema)
TestPath
python
tensorflow__tensorflow
tensorflow/python/eager/wrap_function.py
{ "start": 9434, "end": 18612 }
class ____(function.ConcreteFunction): """Wraps a tf V1 piece of code in a function.""" def __init__( self, fn_graph, variable_holder, attrs=None, signature=None, are_keyword_args_also_positional=False, ): self._variable_holder = variable_holder _lift_unlifted_variables(fn_graph, variable_holder) # We call __init__ after lifting variables so that the function's signature # properly reflects the new captured inputs. for f in fn_graph.as_graph_def(use_pybind11_proto=True).library.function: context.context().add_function_def(f) self._signature = signature function_type = function_type_lib.from_structured_signature( fn_graph.structured_input_signature, fn_graph.structured_outputs, fn_graph.function_captures.capture_types, are_keyword_args_also_positional=are_keyword_args_also_positional, ) atomic_fn = atomic_function.from_func_graph( function._inference_name(fn_graph.name), fn_graph, attrs, function_type ) super().__init__(atomic_fn) def _call_impl(self, args, kwargs): if self._arg_keywords is None: if kwargs: raise NotImplementedError( "Keyword arguments are not supported when calling a " f"wrap_function-decorated function. Got {kwargs}.") if self._signature is not None: args = list(args) for i, arg in enumerate(args): if isinstance(self._signature[i], tensor_lib.DenseSpec): args[i] = ops.convert_to_tensor(arg, self._signature[i].dtype) return self._call_flat(args, self.captured_inputs) else: return super()._call_impl(args, kwargs) def prune( self, feeds, fetches, name=None, input_signature=None, are_keyword_args_also_positional=False, ): """Extract a subgraph of this function's underlying graph. Wraps the subgraph in a new `WrappedFunction` object. Args: feeds: Input tensors to the subgraph to extract, as `Tensor` objects. fetches: Possibly-nested Python data structure containing information about outputs of the target subgraph. Each entry can either be a `Tensor` object (for data outputs), an `Operation` object (for control outputs), or a `TensorInfo` proto. Any additional shape/dtype information provided in a `TensorInfo` and not present in the original graph will be added to the returned subgraph. name: (optional) Name to give to the underlying `FuncGraph` of the returned object. If no name is provided, the graph's name will be `"pruned"`. input_signature: (optional) possibly-nested Python data structure containing `TensorSpec` objects, with which to populate the returned functions's `FuncGraph`'s `structured_input_signature` field. are_keyword_args_also_positional: whether the keyword arguments in `input_signature` are `POSITIONAL_OR_KEYWORD` arguments. If `False` (default), they are treated as `KEYWORD_ONLY` arguments. Returns: A new `WrappedFunction` object containing a copy of the portion of this object's graph that goes from `feeds` to `fetches`. """ # TODO(b/129646028): Add support for CompositeTensors. name = name or "pruned" flat_feeds = nest.flatten(feeds, expand_composites=True) flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds] for f in flat_feeds: if not isinstance(f, tensor_lib.Tensor): raise ValueError( "All members of argument `feeds` must be tensors. " f"Got {f} with type {type(f)}." ) # Ignoring all feeds that are captures allows prune to be called # using wrapped_func.inputs even when it uses variables internal_captures = {id(c) for c in self.graph.internal_captures} flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures] operation_fetches = [] tensor_fetches = [] tensor_infos = [] def _fetch_preprocessing_callback(fetch): """Extract out lists of ops, tensors, and tensor type info. Turns TensorInfos into Tensors in the original `fetches` structure. Also extracts ops from `fetches`. Args: fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or string identifying a Tensor or Operation. Returns: `fetch` converted to a Tensor. """ if isinstance(fetch, ops.Operation): operation_fetches.append(fetch) return fetch elif isinstance(fetch, meta_graph_pb2.TensorInfo): tensor_infos.append(fetch) decoded = _get_element_from_tensor_info(fetch, self._func_graph) if (tensor_util.is_tf_type(decoded) or isinstance(decoded, composite_tensor.CompositeTensor)): tensor_fetches.append(decoded) else: operation_fetches.append(decoded) return decoded elif isinstance( fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)): tensor_fetches.append(fetch) return fetch else: graph_element = self.graph.as_graph_element(fetch) return _fetch_preprocessing_callback(graph_element) fetches = nest.map_structure(_fetch_preprocessing_callback, fetches) # Expand composite tensors into their component dense Tensors. tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True) for f in flat_feeds + tensor_fetches + operation_fetches: if f.graph is not self._func_graph: raise ValueError("Can only prune function whose feeds and fetches " f"from graph {self._func_graph}. Input " f"{f} is from a different graph {f.graph}.") with self._func_graph.as_default(): pruned_graph = func_graph.FuncGraph(name) lift_map = lift_to_graph.lift_to_graph( operation_fetches + tensor_fetches, pruned_graph, sources=flat_feeds + self.graph.internal_captures, base_graph=self._func_graph) # Note that we add the component tensors of any composite tensors to the # returned function's outputs list; the list must contain these component # tensors, or the function's sparse outputs won't work properly. pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches) pruned_graph.control_outputs.extend( [lift_map[operation] for operation in operation_fetches]) pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds) for external_capture, internal_capture in self.graph.captures: pruned_graph.add_capture(external_capture, lift_map[internal_capture]) for ti in tensor_infos: if ti.WhichOneof("encoding") == "name": # Dense tensors only t = pruned_graph.as_graph_element(ti.name) if tensor_util.is_tf_type(t): t.set_shape(tensor_shape.TensorShape(ti.tensor_shape)) # pylint: disable=protected-access for f in self.graph._functions.values(): pruned_graph._add_function(f) # pylint: enable=protected-access pruned_graph.variables = self.graph.variables def _structured_output_mapping(fetched): """callback for `nest.map_structure()`""" lifted = lift_map[fetched] if isinstance(lifted, ops.Operation): return None return lifted # expand_composites=True here causes composite tensors to be expanded # into their component dense Tensors, mapped to the new graph, and then # reconstituted into their original composite form. pruned_graph.structured_outputs = nest.map_structure( _structured_output_mapping, fetches, expand_composites=True) if input_signature: # canonicalize the signature before setting args, kwargs = input_signature args = () if args is None else args input_signature = (args, kwargs) pruned_graph.structured_input_signature = input_signature pruned_fn = WrappedFunction( pruned_graph, variable_holder=self._variable_holder, are_keyword_args_also_positional=are_keyword_args_also_positional, ) pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access # TODO(kathywu): Enable keyword arguments if an input signature is specified pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access return pruned_fn def _filter_returned_ops(fn): """Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns `None` in place of any ops, dict that maps the index in the flat output structure to the returned op ) """ returned_ops = {} def wrap_and_filter_returned_ops(*args, **kwargs): outputs = fn(*args, **kwargs) flat_outputs = nest.flatten(outputs) for n in range(len(flat_outputs)): output = flat_outputs[n] if isinstance(output, ops.Operation): returned_ops[n] = output flat_outputs[n] = None return nest.pack_sequence_as(outputs, flat_outputs) return wrap_and_filter_returned_ops, returned_ops
WrappedFunction
python
huggingface__transformers
src/transformers/models/hiera/modeling_hiera.py
{ "start": 11711, "end": 15021 }
class ____(nn.Module): """ Construct position and patch embeddings. """ def __init__(self, config: HieraConfig, is_mae: bool = False) -> None: super().__init__() self.patch_stride = config.patch_stride tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)] self.mask_spatial_shape = [i // s for i, s in zip(tokens_spatial_shape, config.masked_unit_size)] self.num_tokens = math.prod(tokens_spatial_shape) self.is_mae = is_mae self.patch_embeddings = HieraPatchEmbeddings(config, is_mae=is_mae) self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_tokens, config.embed_dim)) def interpolate_pos_encoding( self, embeddings: torch.Tensor, pos_embeds: torch.Tensor, height: int, width: int ) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing, no class embeddings, and different patch strides. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] num_positions = pos_embeds.shape[1] # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return pos_embeds dim = embeddings.shape[-1] new_height = height // self.patch_stride[0] new_width = width // self.patch_stride[1] sqrt_num_positions = torch_int(num_positions**0.5) pos_embeds = pos_embeds.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) pos_embeds = pos_embeds.permute(0, 3, 1, 2) pos_embeds = nn.functional.interpolate( pos_embeds, size=(new_height, new_width), mode="bicubic", align_corners=False, ) pos_embeds = pos_embeds.permute(0, 2, 3, 1).view(1, -1, dim) return pos_embeds def get_position_embedding( self, embeddings: torch.Tensor, height: int, width: int, interpolate_pos_encoding: bool ) -> torch.FloatTensor: return ( self.interpolate_pos_encoding(embeddings, self.position_embeddings, height, width) if interpolate_pos_encoding else self.position_embeddings ) def forward( self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, ) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]: height, width = pixel_values.shape[-2:] embeddings, bool_masked_pos, ids_restore = self.patch_embeddings(pixel_values, noise=noise) embeddings = embeddings + self.get_position_embedding(embeddings, height, width, interpolate_pos_encoding) return embeddings, bool_masked_pos, ids_restore
HieraEmbeddings
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance1.py
{ "start": 3933, "end": 3971 }
class ____(Base1): value: str
Sub1_1
python
getsentry__sentry
src/sentry/apidocs/examples/notification_examples.py
{ "start": 645, "end": 1828 }
class ____: CREATE_NOTIFICATION_ACTION = [ OpenApiExample( "Create a new email spike protection notification action for a project", value=NOTIFICATION_ACTION_ONE, status_codes=["201"], response_only=True, ) ] GET_NOTIFICATION_ACTION = [ OpenApiExample( "Retrieve a spike protection notification action created for a project", value=NOTIFICATION_ACTION_ONE, status_codes=["200"], response_only=True, ) ] LIST_NOTIFICATION_ACTIONS = [ OpenApiExample( "List all spike protection notification actions for an organization", value=[NOTIFICATION_ACTION_ONE, NOTIFICATION_ACTION_TWO], status_codes=["201"], response_only=True, ) ] UPDATE_NOTIFICATION_ACTION = [ OpenApiExample( "Update a spike protection notification action created for a project to use sentry_notification as notification service.", value=NOTIFICATION_ACTION_ONE, status_codes=["202"], response_only=True, ) ]
NotificationActionExamples
python
getsentry__sentry
tests/snuba/api/endpoints/test_discover_key_transactions.py
{ "start": 951, "end": 1103 }
class ____(Protocol): def __call__( self, url: str, data: dict[str, Any], format: str, **kwargs: Any ) -> HttpResponse: ...
ClientCallable
python
pallets__jinja
src/jinja2/lexer.py
{ "start": 13030, "end": 13395 }
class ____(tuple): # type: ignore[type-arg] """A special tuple for marking a point in the state that can have lstrip applied. """ __slots__ = () # Even though it looks like a no-op, creating instances fails # without this. def __new__(cls, *members, **kwargs): # type: ignore return super().__new__(cls, members)
OptionalLStrip
python
getsentry__sentry
tests/sentry/tsdb/test_base.py
{ "start": 228, "end": 5357 }
class ____(TestCase): def setUp(self) -> None: self.tsdb = BaseTSDB( rollups=( # time in seconds, samples to keep (10, 30), # 5 minutes at 10 seconds (ONE_MINUTE, 120), # 2 hours at 1 minute (ONE_HOUR, 24), # 1 days at 1 hour (ONE_DAY, 30), # 30 days at 1 day ) ) def test_normalize_to_epoch(self) -> None: timestamp = datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc) normalize_to_epoch = self.tsdb.normalize_to_epoch result = normalize_to_epoch(timestamp, 60) assert result == 1368889980 result = normalize_to_epoch(timestamp + timedelta(seconds=20), 60) assert result == 1368890040 result = normalize_to_epoch(timestamp + timedelta(seconds=30), 60) assert result == 1368890040 result = normalize_to_epoch(timestamp + timedelta(seconds=70), 60) assert result == 1368890100 def test_rollup(self) -> None: pre_results = {1: [(1368889980, 5), (1368890040, 10), (1368893640, 7)]} post_results = self.tsdb.rollup(pre_results, 3600) assert len(post_results) == 1 assert post_results[1] == [[1368889200, 15], [1368892800, 7]] def test_calculate_expiry(self) -> None: timestamp = datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc) result = self.tsdb.calculate_expiry(10, 30, timestamp) assert result == 1368890330 @freeze_time("2016-08-01") def test_get_optimal_rollup_series_aligned_intervals(self) -> None: start = datetime.now(timezone.utc) - timedelta(seconds=30) assert self.tsdb.get_optimal_rollup_series(start) == ( 10, [(start + timedelta(seconds=10) * i).timestamp() for i in range(4)], ) start = datetime.now(timezone.utc) - timedelta(minutes=30) assert self.tsdb.get_optimal_rollup_series(start) == ( ONE_MINUTE, [(start + timedelta(minutes=1) * i).timestamp() for i in range(31)], ) start = datetime.now(timezone.utc) - timedelta(hours=5) assert self.tsdb.get_optimal_rollup_series(start) == ( ONE_HOUR, [(start + timedelta(hours=1) * i).timestamp() for i in range(6)], ) start = datetime.now(timezone.utc) - timedelta(days=7) assert self.tsdb.get_optimal_rollup_series(start) == ( ONE_DAY, [(start + timedelta(hours=24) * i).timestamp() for i in range(8)], ) @freeze_time("2016-08-01 00:00:15") def test_get_optimal_rollup_series_offset_intervals(self) -> None: # This test is a funny one (notice it doesn't return a range that # includes the start position.) This occurs because the algorithm for # determining the series to be returned will attempt to return the same # duration of time as represented by the start and end timestamps, but # doesn't necessarily return data *from that specific interval* (the # end timestamp is always included.) start = datetime.now(timezone.utc) - timedelta(seconds=19) assert self.tsdb.get_optimal_rollup_series(start, rollup=10) == ( 10, [ datetime(2016, 8, 1, 0, 0, 0, tzinfo=timezone.utc).timestamp(), datetime(2016, 8, 1, 0, 0, 10, tzinfo=timezone.utc).timestamp(), ], ) now = datetime.now(timezone.utc) + timedelta(seconds=15) start = now - timedelta(seconds=ONE_MINUTE - 1) assert self.tsdb.get_optimal_rollup_series(start, rollup=ONE_MINUTE) == ( ONE_MINUTE, [datetime(2016, 8, 1, 0, 0, 0, tzinfo=timezone.utc).timestamp()], ) now = datetime.now(timezone.utc) + timedelta(hours=11, seconds=45) start = now - timedelta(seconds=ONE_DAY - 1) assert self.tsdb.get_optimal_rollup_series(start, rollup=ONE_DAY) == ( ONE_DAY, [datetime(2016, 8, 1, 0, tzinfo=timezone.utc).timestamp()], ) @freeze_time("2016-08-01") def test_make_series_aligned_intervals(self) -> None: start = datetime.now(timezone.utc) - timedelta(seconds=30) assert self.tsdb.make_series(0, start) == [ ((start + timedelta(seconds=10) * i).timestamp(), 0) for i in range(4) ] start = datetime.now(timezone.utc) - timedelta(minutes=30) assert self.tsdb.make_series(lambda timestamp: 1, start) == [ ((start + timedelta(minutes=1) * i).timestamp(), 1) for i in range(31) ] counter = itertools.count() start = datetime.now(timezone.utc) - timedelta(hours=5) assert self.tsdb.make_series(lambda timestamp: next(counter), start) == [ ((start + timedelta(hours=1) * i).timestamp(), i) for i in range(6) ] start = datetime.now(timezone.utc) - timedelta(days=7) assert self.tsdb.make_series(0, start) == [ ((start + timedelta(hours=24) * i).timestamp(), 0) for i in range(8) ]
BaseTSDBTest
python
huggingface__transformers
src/transformers/models/imagegpt/modeling_imagegpt.py
{ "start": 26929, "end": 32832 }
class ____(ImageGPTPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"} def __init__(self, config: ImageGPTConfig): super().__init__(config) self.transformer = ImageGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Any, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details. labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling >>> import torch >>> import matplotlib.pyplot as plt >>> import numpy as np >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small") >>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small") >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") >>> model.to(device) # doctest: +IGNORE_RESULT >>> # unconditional generation of 8 images >>> batch_size = 4 >>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token >>> context = context.to(device) >>> output = model.generate( ... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40 ... ) >>> clusters = image_processor.clusters >>> height = image_processor.size["height"] >>> width = image_processor.size["width"] >>> samples = output[:, 1:].detach().cpu().numpy() >>> samples_img = [ ... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples ... ] # convert color cluster tokens back to pixels >>> f, axes = plt.subplots(1, batch_size, dpi=300) >>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT ... ax.axis("off") ... ax.imshow(img) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" The ImageGPT Model transformer with an image classification head on top (linear layer). [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification. """ )
ImageGPTForCausalImageModeling
python
kamyu104__LeetCode-Solutions
Python/minimum-moves-to-move-a-box-to-their-target-location.py
{ "start": 80, "end": 2673 }
class ____(object): def minPushBox(self, grid): """ :type grid: List[List[str]] :rtype: int """ directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] def dot(a, b): return a[0]*b[0]+a[1]*b[1] def can_reach(grid, b, p, t): closer, detour = [p], [] lookup = set([b]) while closer or detour: if not closer: closer, detour = detour, closer p = closer.pop() if p == t: return True if p in lookup: continue lookup.add(p) for dx, dy in directions: np = (p[0]+dx, p[1]+dy) if not (0 <= np[0] < len(grid) and 0 <= np[1] < len(grid[0]) and grid[np[0]][np[1]] != '#' and np not in lookup): continue (closer if dot((dx, dy), (t[0]-p[0], t[1]-p[1])) > 0 else detour).append(np) return False def g(a, b): return abs(a[0]-b[0])+abs(a[1]-b[1]) def a_star(grid, b, p, t): f, dh = g(b, t), 2 closer, detour = [(b, p)], [] lookup = set() while closer or detour: if not closer: f += dh closer, detour = detour, closer b, p = closer.pop() if b == t: return f if (b, p) in lookup: continue lookup.add((b, p)) for dx, dy in directions: nb, np = (b[0]+dx, b[1]+dy), (b[0]-dx, b[1]-dy) if not (0 <= nb[0] < len(grid) and 0 <= nb[1] < len(grid[0]) and 0 <= np[0] < len(grid) and 0 <= np[1] < len(grid[0]) and grid[nb[0]][nb[1]] != '#' and grid[np[0]][np[1]] != '#' and (nb, b) not in lookup and can_reach(grid, b, p, np)): continue (closer if dot((dx, dy), (t[0]-b[0], t[1]-b[1])) > 0 else detour).append((nb, b)) return -1 b, p, t = None, None, None for i in xrange(len(grid)): for j in xrange(len(grid[0])): if grid[i][j] == 'B': b = (i, j) elif grid[i][j] == 'S': p = (i, j) elif grid[i][j] == 'T': t = (i, j) return a_star(grid, b, p, t)
Solution
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/fail_test_audit_docstring/package.py
{ "start": 225, "end": 962 }
class ____(MakefilePackage): """Simple package with a stand-alone test that is missing its docstring.""" homepage = "http://github.com/dummy/fail-test-audit-docstring" url = "https://github.com/dummy/fail-test-audit-docstring/archive/v1.0.tar.gz" version("2.0", sha256="c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1") version("1.0", sha256="abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234") # The required docstring is missing. def test_missing_docstring(self): print("Ran test_missing_docstring") # The required docstring is effectively empty. def test_empty_docstring(self): """ """ print("Ran test_empty_docstring")
FailTestAuditDocstring
python
plotly__plotly.py
_plotly_utils/exceptions.py
{ "start": 41, "end": 93 }
class ____(PlotlyError): pass
PlotlyEmptyDataError
python
viewflow__viewflow
viewflow/workflow/admin.py
{ "start": 75, "end": 595 }
class ____(admin.TabularInline): """Task inline.""" model = Task fields = ["flow_task", "flow_task_type", "status", "token", "owner"] readonly_fields = ["flow_task", "flow_task_type", "status", "token", "owner"] def has_add_permission(self, request, obj=None): """Disable manually task creation.""" return False def has_delete_permission(self, request, obj=None): """Disable task deletion in the process inline.""" return False @admin.register(Process)
TaskInline
python
getsentry__sentry
tests/sentry/cache/test_django.py
{ "start": 89, "end": 1289 }
class ____(TestCase): def setUp(self) -> None: self.cache = DjangoCache() self.cache_key = "test-key" self.cache_val = "test-val" def test_get_set(self) -> None: assert self.cache.get(self.cache_key) is None self.cache.set(self.cache_key, self.cache_val, 50) assert self.cache.get(self.cache_key) == self.cache_val # Test re-writing to an existing cache key works self.cache.set(self.cache_key, "new-test-val", 50) assert self.cache.get(self.cache_key) == "new-test-val" def test_delete(self) -> None: self.cache.set(self.cache_key, self.cache_val, 50) assert self.cache.get(self.cache_key) == self.cache_val self.cache.delete(self.cache_key) assert self.cache.get(self.cache_key) is None # Test deletion without an entry works self.cache.delete(self.cache_key) assert self.cache.get(self.cache_key) is None def test_ttl(self) -> None: self.cache.set(self.cache_key, self.cache_val, 0.1) assert self.cache.get(self.cache_key) == self.cache_val time.sleep(0.1) assert self.cache.get(self.cache_key) is None
DjangoCacheTest
python
scipy__scipy
scipy/interpolate/_fitpack2.py
{ "start": 22907, "end": 27258 }
class ____(UnivariateSpline): """ 1-D interpolating spline for a given set of data points. .. legacy:: class Specifically, we recommend using `make_interp_spline` instead. Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline function passes through all provided points. Equivalent to `UnivariateSpline` with `s` = 0. Parameters ---------- x : (N,) array_like Input dimension of data points -- must be strictly increasing y : (N,) array_like input dimension of data points w : (N,) array_like, optional Weights for spline fitting. Must be positive. If None (default), weights are all 1. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox=[x[0], x[-1]]``. k : int, optional Degree of the smoothing spline. Must be ``1 <= k <= 5``. Default is ``k = 3``, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence. * if ext=0 or 'extrapolate', return the extrapolated value. * if ext=1 or 'zeros', return 0 * if ext=2 or 'raise', raise a ValueError * if ext=3 of 'const', return the boundary value. The default value is 0. check_finite : bool, optional Whether to check that the input arrays contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination or non-sensical results) if the inputs do contain infinities or NaNs. Default is False. See Also -------- UnivariateSpline : a smooth univariate spline to fit a given set of data points. LSQUnivariateSpline : a spline for which knots are user-selected SmoothBivariateSpline : a smoothing bivariate spline through the given points LSQBivariateSpline : a bivariate spline using weighted least-squares fitting splrep : a function to find the B-spline representation of a 1-D curve splev : a function to evaluate a B-spline or its derivatives sproot : a function to find the roots of a cubic B-spline splint : a function to evaluate the definite integral of a B-spline between two given points spalde : a function to evaluate all derivatives of a B-spline Notes ----- The number of data points must be larger than the spline degree `k`. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.interpolate import InterpolatedUnivariateSpline >>> rng = np.random.default_rng() >>> x = np.linspace(-3, 3, 50) >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50) >>> spl = InterpolatedUnivariateSpline(x, y) >>> plt.plot(x, y, 'ro', ms=5) >>> xs = np.linspace(-3, 3, 1000) >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7) >>> plt.show() Notice that the ``spl(x)`` interpolates `y`: >>> spl.get_residual() 0.0 """ def __init__(self, x, y, w=None, bbox=[None]*2, k=3, ext=0, check_finite=False): x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None, ext, check_finite) if not np.all(diff(x) > 0.0): raise ValueError('x must be strictly increasing') # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier with FITPACK_LOCK: self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0], xe=bbox[1], s=0) self._reset_class() _fpchec_error_string = """The input parameters have been rejected by fpchec. \ This means that at least one of the following conditions is violated: 1) k+1 <= n-k-1 <= m 2) t(1) <= t(2) <= ... <= t(k+1) t(n-k) <= t(n-k+1) <= ... <= t(n) 3) t(k+1) < t(k+2) < ... < t(n-k) 4) t(k+1) <= x(i) <= t(n-k) 5) The conditions specified by Schoenberg and Whitney must hold for at least one subset of data points, i.e., there must be a subset of data points y(j) such that t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1 """ @xp_capabilities(out_of_scope=True)
InterpolatedUnivariateSpline
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_checkbox04.py
{ "start": 315, "end": 1758 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("checkbox04.xlsx") def test_create_file_with_insert_checkbox(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"checkbox": True, "bg_color": "red"}) worksheet.insert_checkbox("E9", False, cell_format) workbook.close() self.assertExcelEqual() def test_create_file_with_insert_checkbox_and_manual_format(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"checkbox": True, "bg_color": "red"}) worksheet.insert_checkbox("E9", False, cell_format) workbook.close() self.assertExcelEqual() def test_create_file_with_boolean_and_format(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"checkbox": True, "bg_color": "red"}) worksheet.write("E9", False, cell_format) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
openai__openai-python
src/openai/resources/vector_stores/file_batches.py
{ "start": 33092, "end": 33628 }
class ____: def __init__(self, file_batches: FileBatches) -> None: self._file_batches = file_batches self.create = to_streamed_response_wrapper( file_batches.create, ) self.retrieve = to_streamed_response_wrapper( file_batches.retrieve, ) self.cancel = to_streamed_response_wrapper( file_batches.cancel, ) self.list_files = to_streamed_response_wrapper( file_batches.list_files, )
FileBatchesWithStreamingResponse
python
doocs__leetcode
solution/1900-1999/1974.Minimum Time to Type Word Using Special Typewriter/Solution.py
{ "start": 0, "end": 231 }
class ____: def minTimeToType(self, word: str) -> int: ans, a = len(word), ord("a") for c in map(ord, word): d = abs(c - a) ans += min(d, 26 - d) a = c return ans
Solution
python
spyder-ide__spyder
spyder/plugins/layout/widgets/dialog.py
{ "start": 826, "end": 933 }
class ____: MoveUp = 'move_up' MoveDown = 'move_down' Remove = 'remove'
LayoutSettingsToolButtons
python
getsentry__sentry-python
sentry_sdk/integrations/strawberry.py
{ "start": 2050, "end": 4515 }
class ____(Integration): identifier = "strawberry" origin = f"auto.graphql.{identifier}" def __init__(self, async_execution=None): # type: (Optional[bool]) -> None if async_execution not in (None, False, True): raise ValueError( 'Invalid value for async_execution: "{}" (must be bool)'.format( async_execution ) ) self.async_execution = async_execution @staticmethod def setup_once(): # type: () -> None version = package_version("strawberry-graphql") _check_minimum_version(StrawberryIntegration, version, "strawberry-graphql") _patch_schema_init() _patch_views() def _patch_schema_init(): # type: () -> None old_schema_init = Schema.__init__ @functools.wraps(old_schema_init) def _sentry_patched_schema_init(self, *args, **kwargs): # type: (Schema, Any, Any) -> None integration = sentry_sdk.get_client().get_integration(StrawberryIntegration) if integration is None: return old_schema_init(self, *args, **kwargs) extensions = kwargs.get("extensions") or [] should_use_async_extension = None # type: Optional[bool] if integration.async_execution is not None: should_use_async_extension = integration.async_execution else: # try to figure it out ourselves should_use_async_extension = _guess_if_using_async(extensions) if should_use_async_extension is None: warnings.warn( "Assuming strawberry is running sync. If not, initialize the integration as StrawberryIntegration(async_execution=True).", stacklevel=2, ) should_use_async_extension = False # remove the built in strawberry sentry extension, if present extensions = [ extension for extension in extensions if extension not in (StrawberrySentryAsyncExtension, StrawberrySentrySyncExtension) ] # add our extension extensions.append( SentryAsyncExtension if should_use_async_extension else SentrySyncExtension ) kwargs["extensions"] = extensions return old_schema_init(self, *args, **kwargs) Schema.__init__ = _sentry_patched_schema_init # type: ignore[method-assign]
StrawberryIntegration
python
cython__cython
Cython/Debugger/libpython.py
{ "start": 79575, "end": 81086 }
class ____: """ This class defines the interface that ExecutionControlCommandBase needs to provide language-specific execution control. Classes that implement this interface should implement: lineno(frame) Tells the current line number (only called for a relevant frame). If lineno is a false value it is not checked for a difference. is_relevant_function(frame) tells whether we care about frame 'frame' get_source_line(frame) get the line of source code for the current line (only called for a relevant frame). If the source code cannot be retrieved this function should return None exc_info(frame) -- optional tells whether an exception was raised, if so, it should return a string representation of the exception value, None otherwise. static_break_functions() returns an iterable of function names that are considered relevant and should halt step-into execution. This is needed to provide a performing step-into runtime_break_functions() -- optional list of functions that we should break into depending on the context """ def exc_info(self, frame): "See this class' docstring." def runtime_break_functions(self): """ Implement this if the list of step-into functions depends on the context. """ return ()
LanguageInfo
python
jazzband__django-oauth-toolkit
tests/test_hybrid.py
{ "start": 52095, "end": 57943 }
class ____(BaseTest): def test_pre_auth_default_scopes(self): """ Test response for a valid client_id with response_type: code using default scopes """ self.client.login(username="hy_test_user", password="123456") query_string = urlencode( { "client_id": self.application.client_id, "response_type": "code token", "state": "random_state_string", "redirect_uri": "http://example.org", } ) url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string) response = self.client.get(url) self.assertEqual(response.status_code, 200) # check form is in context and form params are valid self.assertIn("form", response.context) form = response.context["form"] self.assertEqual(form["redirect_uri"].value(), "http://example.org") self.assertEqual(form["state"].value(), "random_state_string") self.assertEqual(form["scope"].value(), "read") self.assertEqual(form["client_id"].value(), self.application.client_id) @pytest.mark.django_db(databases=retrieve_current_databases()) @pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW) def test_id_token_nonce_in_token_response(oauth2_settings, test_user, hybrid_application, client, oidc_key): client.force_login(test_user) auth_rsp = client.post( reverse("oauth2_provider:authorize"), data={ "client_id": hybrid_application.client_id, "state": "random_state_string", "scope": "openid", "redirect_uri": "http://example.org", "response_type": "code id_token", "nonce": "random_nonce_string", "allow": True, }, ) assert auth_rsp.status_code == 302 auth_data = parse_qs(urlparse(auth_rsp["Location"]).fragment) assert "code" in auth_data assert "id_token" in auth_data # Decode the id token - is the nonce correct jwt_token = jwt.JWT(key=oidc_key, jwt=auth_data["id_token"][0]) claims = json.loads(jwt_token.claims) assert "nonce" in claims assert claims["nonce"] == "random_nonce_string" code = auth_data["code"][0] client.logout() # Get the token response using the code token_rsp = client.post( reverse("oauth2_provider:token"), data={ "grant_type": "authorization_code", "code": code, "redirect_uri": "http://example.org", "client_id": hybrid_application.client_id, "client_secret": CLEARTEXT_SECRET, "scope": "openid", }, ) assert token_rsp.status_code == 200 token_data = token_rsp.json() assert "id_token" in token_data # The nonce should be present in this id token also jwt_token = jwt.JWT(key=oidc_key, jwt=token_data["id_token"]) claims = json.loads(jwt_token.claims) assert "nonce" in claims assert claims["nonce"] == "random_nonce_string" @pytest.mark.django_db(databases=retrieve_current_databases()) @pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW) def test_claims_passed_to_code_generation( oauth2_settings, test_user, hybrid_application, client, mocker, oidc_key ): # Add a spy on to OAuth2Validator.finalize_id_token mocker.patch.object( OAuth2Validator, "finalize_id_token", spy_on(OAuth2Validator.finalize_id_token), ) claims = {"id_token": {"email": {"essential": True}}} client.force_login(test_user) auth_form_rsp = client.get( reverse("oauth2_provider:authorize"), data={ "client_id": hybrid_application.client_id, "state": "random_state_string", "scope": "openid", "redirect_uri": "http://example.org", "response_type": "code id_token", "nonce": "random_nonce_string", "claims": json.dumps(claims), }, ) # Check that claims has made it in to the form to be submitted assert auth_form_rsp.status_code == 200 form_initial_data = auth_form_rsp.context_data["form"].initial assert "claims" in form_initial_data assert json.loads(form_initial_data["claims"]) == claims # Filter out not specified values form_data = {key: value for key, value in form_initial_data.items() if value is not None} # Now submitting the form (with allow=True) should persist requested claims auth_rsp = client.post( reverse("oauth2_provider:authorize"), data={"allow": True, **form_data}, ) assert auth_rsp.status_code == 302 auth_data = parse_qs(urlparse(auth_rsp["Location"]).fragment) assert "code" in auth_data assert "id_token" in auth_data assert OAuth2Validator.finalize_id_token.spy.call_count == 1 oauthlib_request = OAuth2Validator.finalize_id_token.spy.call_args[0][4] assert oauthlib_request.claims == claims assert Grant.objects.get().claims == json.dumps(claims) OAuth2Validator.finalize_id_token.spy.reset_mock() # Get the token response using the code client.logout() code = auth_data["code"][0] token_rsp = client.post( reverse("oauth2_provider:token"), data={ "grant_type": "authorization_code", "code": code, "redirect_uri": "http://example.org", "client_id": hybrid_application.client_id, "client_secret": CLEARTEXT_SECRET, "scope": "openid", }, ) assert token_rsp.status_code == 200 token_data = token_rsp.json() assert "id_token" in token_data assert OAuth2Validator.finalize_id_token.spy.call_count == 1 oauthlib_request = OAuth2Validator.finalize_id_token.spy.call_args[0][4] assert oauthlib_request.claims == claims
TestDefaultScopesHybrid
python
crytic__slither
slither/tools/upgradeability/checks/initialization.py
{ "start": 5043, "end": 7157 }
class ____(AbstractCheck): ARGUMENT = "missing-init-modifier" IMPACT = CheckClassification.HIGH HELP = "initializer() is not called" WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#initializer-is-not-called" WIKI_TITLE = "initializer() is not called" # region wiki_description WIKI_DESCRIPTION = """ Detect if `Initializable.initializer()` or `Initializable.reinitializer(uint64)` is called. """ # endregion wiki_description # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract Contract{ function initialize() public{ /// } } ``` `initialize` should have the `initializer` modifier to prevent someone from initializing the contract multiple times. """ # endregion wiki_exploit_scenario # region wiki_recommendation WIKI_RECOMMENDATION = """ Use `Initializable.initializer()` or `Initializable.reinitializer(uint64)`. """ # endregion wiki_recommendation REQUIRE_CONTRACT = True def _check(self): initializable = self.contract.file_scope.get_contract_from_name("Initializable") # See InitializablePresent if initializable is None: return [] # See InitializableInherited if initializable not in self.contract.inheritance: return [] initializer = self.contract.get_modifier_from_canonical_name("Initializable.initializer()") reinitializer = self.contract.get_modifier_from_canonical_name( "Initializable.reinitializer(uint64)" ) # InitializableInitializer if initializer is None and reinitializer is None: return [] results = [] all_init_functions = _get_initialize_functions(self.contract) for f in all_init_functions: if initializer not in f.modifiers and reinitializer not in f.modifiers: info = [f, " does not call the initializer or reinitializer modifier.\n"] json = self.generate_result(info) results.append(json) return results
MissingInitializerModifier
python
run-llama__llama_index
llama-index-core/llama_index/core/selectors/embedding_selectors.py
{ "start": 514, "end": 3058 }
class ____(BaseSelector): """ Embedding selector. Embedding selector that chooses one out of many options. Args: embed_model (BaseEmbedding): An embedding model. """ def __init__( self, embed_model: BaseEmbedding, ) -> None: self._embed_model = embed_model @classmethod def from_defaults( cls, embed_model: Optional[BaseEmbedding] = None, ) -> "EmbeddingSingleSelector": # optionally initialize defaults embed_model = embed_model or Settings.embed_model # construct prompt return cls(embed_model) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def _select( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: query_embedding = self._embed_model.get_query_embedding(query.query_str) text_embeddings = [ self._embed_model.get_text_embedding(choice.description) for choice in choices ] top_similarities, top_ids = get_top_k_embeddings( query_embedding, text_embeddings, similarity_top_k=1, embedding_ids=list(range(len(choices))), ) # get top choice top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}" top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason) # parse output return SelectorResult(selections=[top_selection]) async def _aselect( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: query_embedding = await self._embed_model.aget_query_embedding(query.query_str) text_embeddings = [ await self._embed_model.aget_text_embedding(choice.description) for choice in choices ] top_similarities, top_ids = get_top_k_embeddings( query_embedding, text_embeddings, similarity_top_k=1, embedding_ids=list(range(len(choices))), ) # get top choice top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}" top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason) # parse output return SelectorResult(selections=[top_selection])
EmbeddingSingleSelector
python
getsentry__sentry
src/sentry/release_health/release_monitor/metrics.py
{ "start": 927, "end": 11810 }
class ____(BaseReleaseMonitorBackend): def fetch_projects_with_recent_sessions_with_offset(self) -> Mapping[int, Sequence[int]]: with metrics.timer( "release_monitor.fetch_projects_with_recent_sessions.loop", sample_rate=1.0 ): aggregated_projects = defaultdict(list) start_time = time.time() offset = 0 while (time.time() - start_time) < self.MAX_SECONDS: query = ( Query( match=Entity(EntityKey.OrgMetricsCounters.value), select=[ Column("org_id"), Column("project_id"), ], groupby=[Column("org_id"), Column("project_id")], where=[ Condition( Column("timestamp"), Op.GTE, datetime.utcnow() - timedelta(hours=6) ), Condition(Column("timestamp"), Op.LT, datetime.utcnow()), Condition( Column("metric_id"), Op.EQ, SESSION_METRIC_NAMES[SessionMRI.RAW_SESSION.value], ), ], granularity=Granularity(3600), orderby=[ OrderBy(Column("org_id"), Direction.ASC), OrderBy(Column("project_id"), Direction.ASC), ], ) .set_limit(self.CHUNK_SIZE + 1) .set_offset(offset) ) request = Request( dataset=Dataset.Metrics.value, app_id="release_health", query=query ) data = raw_snql_query( request, referrer="release_monitor.fetch_projects_with_recent_sessions" )["data"] count = len(data) more_results = count > self.CHUNK_SIZE offset += self.CHUNK_SIZE if more_results: data = data[:-1] for row in data: aggregated_projects[row["org_id"]].append(row["project_id"]) if not more_results: break else: logger.error( "release_monitor.fetch_projects_with_recent_sessions.loop_timeout", extra={"offset": offset}, ) return aggregated_projects def fetch_projects_with_recent_sessions_with_filter(self) -> Mapping[int, Sequence[int]]: with metrics.timer( "release_monitor.fetch_projects_with_recent_sessions.loop", sample_rate=1.0 ): aggregated_projects = defaultdict(list) start_time = time.time() prev_org_id = 0 prev_project_id = 0 while (time.time() - start_time) < self.MAX_SECONDS: query = Query( match=Entity(EntityKey.OrgMetricsCounters.value), select=[ Column("org_id"), Column("project_id"), ], groupby=[Column("org_id"), Column("project_id")], where=[ Condition( Column("timestamp"), Op.GTE, datetime.utcnow() - timedelta(hours=6) ), Condition(Column("timestamp"), Op.LT, datetime.utcnow()), Condition( Column("metric_id"), Op.EQ, SESSION_METRIC_NAMES[SessionMRI.RAW_SESSION.value], ), BooleanCondition( BooleanOp.OR, [ Condition(Column("org_id"), Op.GT, prev_org_id), BooleanCondition( BooleanOp.AND, [ Condition(Column("org_id"), Op.EQ, prev_org_id), Condition(Column("project_id"), Op.GT, prev_project_id), ], ), ], ), ], granularity=Granularity(3600), orderby=[ OrderBy(Column("org_id"), Direction.ASC), OrderBy(Column("project_id"), Direction.ASC), ], ).set_limit(self.CHUNK_SIZE + 1) request = Request( dataset=Dataset.Metrics.value, app_id="release_health", query=query ) data = raw_snql_query( request, referrer="release_monitor.fetch_projects_with_recent_sessions" )["data"] count = len(data) more_results = count > self.CHUNK_SIZE if more_results: data = data[:-1] for row in data: aggregated_projects[row["org_id"]].append(row["project_id"]) # Update prev_org_id and prev_project_id for the next iteration if data: last_row = data[-1] prev_org_id = last_row["org_id"] prev_project_id = last_row["project_id"] if not more_results: break else: logger.error( "release_monitor.fetch_projects_with_recent_sessions.loop_timeout", extra={"prev_org_id": prev_org_id, "prev_project_id": prev_project_id}, ) return aggregated_projects def fetch_projects_with_recent_sessions(self) -> Mapping[int, Sequence[int]]: if options.get("release-health.use-org-and-project-filter"): return self.fetch_projects_with_recent_sessions_with_filter() return self.fetch_projects_with_recent_sessions_with_offset() def fetch_project_release_health_totals( self, org_id: int, project_ids: Sequence[int] ) -> Totals: start_time = time.time() offset = 0 totals: Totals = defaultdict(dict) with metrics.timer("release_monitor.fetch_project_release_health_totals.loop"): while (time.time() - start_time) < self.MAX_SECONDS: release_key = resolve_tag_key(UseCaseID.SESSIONS, org_id, "release") release_col = Column(release_key) env_key = resolve_tag_key(UseCaseID.SESSIONS, org_id, "environment") env_col = Column(env_key) query = ( Query( match=Entity(EntityKey.MetricsCounters.value), select=[ Function("sum", [Column("value")], "sessions"), Column("project_id"), release_col, env_col, ], groupby=[ Column("project_id"), release_col, env_col, ], where=[ Condition( Column("timestamp"), Op.GTE, datetime.utcnow() - timedelta(hours=6), ), Condition(Column("timestamp"), Op.LT, datetime.utcnow()), Condition(Column("org_id"), Op.EQ, org_id), Condition(Column("project_id"), Op.IN, project_ids), Condition( Column("metric_id"), Op.EQ, indexer.resolve( UseCaseID.SESSIONS, org_id, SessionMRI.RAW_SESSION.value ), ), ], granularity=Granularity(21600), orderby=[ OrderBy(Column("project_id"), Direction.ASC), OrderBy(release_col, Direction.ASC), OrderBy(env_col, Direction.ASC), ], ) .set_limit(self.CHUNK_SIZE + 1) .set_offset(offset) ) request = Request( dataset=Dataset.Metrics.value, app_id="release_health", query=query, tenant_ids={"organization_id": org_id}, ) with metrics.timer("release_monitor.fetch_project_release_health_totals.query"): data = raw_snql_query( request, "release_monitor.fetch_project_release_health_totals" )["data"] count = len(data) more_results = count > self.CHUNK_SIZE offset += self.CHUNK_SIZE if more_results: data = data[:-1] # convert indexes back to strings indexes: set[int] = set() for row in data: indexes.add(row[env_key]) indexes.add(row[release_key]) resolved_strings = indexer.bulk_reverse_resolve( UseCaseID.SESSIONS, org_id, indexes ) for row in data: env_name = resolved_strings.get(row[env_key]) release_name = resolved_strings.get(row[release_key]) row_totals = totals[row["project_id"]].setdefault( env_name, {"total_sessions": 0, "releases": defaultdict(int)} # type: ignore[arg-type] ) row_totals["total_sessions"] += row["sessions"] row_totals["releases"][release_name] += row["sessions"] # type: ignore[index] if not more_results: break else: logger.error( "fetch_project_release_health_totals.loop_timeout", extra={"org_id": org_id, "project_ids": project_ids}, ) return totals
MetricReleaseMonitorBackend
python
Pylons__pyramid
tests/test_config/test_rendering.py
{ "start": 18, "end": 1365 }
class ____(unittest.TestCase): def _makeOne(self, *arg, **kw): from pyramid.config import Configurator config = Configurator(*arg, **kw) return config def test_add_default_renderers(self): from pyramid.config.rendering import DEFAULT_RENDERERS from pyramid.interfaces import IRendererFactory config = self._makeOne(autocommit=True) config.add_default_renderers() for name, impl in DEFAULT_RENDERERS: self.assertTrue( config.registry.queryUtility(IRendererFactory, name) is not None ) def test_add_renderer(self): from pyramid.interfaces import IRendererFactory config = self._makeOne(autocommit=True) renderer = object() config.add_renderer('name', renderer) self.assertEqual( config.registry.getUtility(IRendererFactory, 'name'), renderer ) def test_add_renderer_dottedname_factory(self): from pyramid.interfaces import IRendererFactory config = self._makeOne(autocommit=True) import tests.test_config config.add_renderer('name', 'tests.test_config') self.assertEqual( config.registry.getUtility(IRendererFactory, 'name'), tests.test_config, )
TestRenderingConfiguratorMixin
python
weaviate__weaviate-python-client
weaviate/connect/integrations.py
{ "start": 787, "end": 1136 }
class ____(_IntegrationConfig): api_key: str = Field(serialization_alias="X-Huggingface-Api-Key") requests_per_minute_embeddings: Optional[int] = Field( serialization_alias="X-Huggingface-Ratelimit-RequestPM-Embedding" ) base_url: Optional[str] = Field(serialization_alias="X-Huggingface-Baseurl")
_IntegrationConfigHuggingface
python
sqlalchemy__sqlalchemy
test/ext/test_compiler.py
{ "start": 15201, "end": 16776 }
class ____(fixtures.TestBase, AssertsCompiledSQL): """Test replacement of default compilation on existing constructs.""" __dialect__ = "default" def teardown_test(self): for cls in (Select, BindParameter): deregister(cls) def test_select(self): t1 = table("t1", column("c1"), column("c2")) @compiles(Select, "sqlite") def compile_(element, compiler, **kw): return "OVERRIDE" s1 = select(t1) self.assert_compile(s1, "SELECT t1.c1, t1.c2 FROM t1") from sqlalchemy.dialects.sqlite import base as sqlite self.assert_compile(s1, "OVERRIDE", dialect=sqlite.dialect()) def test_binds_in_select(self): t = table("t", column("a"), column("b"), column("c")) @compiles(BindParameter) def gen_bind(element, compiler, **kw): return "BIND(%s)" % compiler.visit_bindparam(element, **kw) self.assert_compile( t.select().where(t.c.c == 5), "SELECT t.a, t.b, t.c FROM t WHERE t.c = BIND(:c_1)", use_default_dialect=True, ) def test_binds_in_dml(self): t = table("t", column("a"), column("b"), column("c")) @compiles(BindParameter) def gen_bind(element, compiler, **kw): return "BIND(%s)" % compiler.visit_bindparam(element, **kw) self.assert_compile( t.insert(), "INSERT INTO t (a, b) VALUES (BIND(:a), BIND(:b))", {"a": 1, "b": 2}, use_default_dialect=True, )
DefaultOnExistingTest
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 10562, "end": 10857 }
class ____(BaseModel): """ Request body schema for creating variables. """ model_config = ConfigDict( extra="forbid", ) val: Annotated[str | None, Field(title="Val")] = None description: Annotated[str | None, Field(title="Description")] = None
VariablePostBody
python
plotly__plotly.py
plotly/graph_objs/heatmap/hoverlabel/_font.py
{ "start": 233, "end": 17143 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "heatmap.hoverlabel" _path_str = "heatmap.hoverlabel.font" _valid_props = { "color", "colorsrc", "family", "familysrc", "lineposition", "linepositionsrc", "shadow", "shadowsrc", "size", "sizesrc", "style", "stylesrc", "textcase", "textcasesrc", "variant", "variantsrc", "weight", "weightsrc", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def colorsrc(self): """ Sets the source reference on Chart Studio Cloud for `color`. The 'colorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorsrc"] @colorsrc.setter def colorsrc(self, val): self["colorsrc"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def familysrc(self): """ Sets the source reference on Chart Studio Cloud for `family`. The 'familysrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["familysrc"] @familysrc.setter def familysrc(self, val): self["familysrc"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') - A list or array of the above Returns ------- Any|numpy.ndarray """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def linepositionsrc(self): """ Sets the source reference on Chart Studio Cloud for `lineposition`. The 'linepositionsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["linepositionsrc"] @linepositionsrc.setter def linepositionsrc(self, val): self["linepositionsrc"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string - A tuple, list, or one-dimensional numpy array of the above Returns ------- str|numpy.ndarray """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def shadowsrc(self): """ Sets the source reference on Chart Studio Cloud for `shadow`. The 'shadowsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["shadowsrc"] @shadowsrc.setter def shadowsrc(self, val): self["shadowsrc"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def sizesrc(self): """ Sets the source reference on Chart Studio Cloud for `size`. The 'sizesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["sizesrc"] @sizesrc.setter def sizesrc(self, val): self["sizesrc"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def stylesrc(self): """ Sets the source reference on Chart Studio Cloud for `style`. The 'stylesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["stylesrc"] @stylesrc.setter def stylesrc(self, val): self["stylesrc"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def textcasesrc(self): """ Sets the source reference on Chart Studio Cloud for `textcase`. The 'textcasesrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["textcasesrc"] @textcasesrc.setter def textcasesrc(self, val): self["textcasesrc"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def variantsrc(self): """ Sets the source reference on Chart Studio Cloud for `variant`. The 'variantsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["variantsrc"] @variantsrc.setter def variantsrc(self, val): self["variantsrc"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def weightsrc(self): """ Sets the source reference on Chart Studio Cloud for `weight`. The 'weightsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["weightsrc"] @weightsrc.setter def weightsrc(self, val): self["weightsrc"] = val @property def _prop_descriptions(self): return """\ color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. """ def __init__( self, arg=None, color=None, colorsrc=None, family=None, familysrc=None, lineposition=None, linepositionsrc=None, shadow=None, shadowsrc=None, size=None, sizesrc=None, style=None, stylesrc=None, textcase=None, textcasesrc=None, variant=None, variantsrc=None, weight=None, weightsrc=None, **kwargs, ): """ Construct a new Font object Sets the font used in hover labels. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.heatmap.hoverlabel.Font` color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. familysrc Sets the source reference on Chart Studio Cloud for `family`. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. linepositionsrc Sets the source reference on Chart Studio Cloud for `lineposition`. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. shadowsrc Sets the source reference on Chart Studio Cloud for `shadow`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. style Sets whether a font should be styled with a normal or italic face from its family. stylesrc Sets the source reference on Chart Studio Cloud for `style`. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. textcasesrc Sets the source reference on Chart Studio Cloud for `textcase`. variant Sets the variant of the font. variantsrc Sets the source reference on Chart Studio Cloud for `variant`. weight Sets the weight (or boldness) of the font. weightsrc Sets the source reference on Chart Studio Cloud for `weight`. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.heatmap.hoverlabel.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.heatmap.hoverlabel.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("colorsrc", arg, colorsrc) self._set_property("family", arg, family) self._set_property("familysrc", arg, familysrc) self._set_property("lineposition", arg, lineposition) self._set_property("linepositionsrc", arg, linepositionsrc) self._set_property("shadow", arg, shadow) self._set_property("shadowsrc", arg, shadowsrc) self._set_property("size", arg, size) self._set_property("sizesrc", arg, sizesrc) self._set_property("style", arg, style) self._set_property("stylesrc", arg, stylesrc) self._set_property("textcase", arg, textcase) self._set_property("textcasesrc", arg, textcasesrc) self._set_property("variant", arg, variant) self._set_property("variantsrc", arg, variantsrc) self._set_property("weight", arg, weight) self._set_property("weightsrc", arg, weightsrc) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
Textualize__textual
docs/examples/guide/reactivity/dynamic_watch.py
{ "start": 544, "end": 972 }
class ____(App[None]): def compose(self) -> ComposeResult: yield Counter() yield ProgressBar(total=100, show_eta=False) def on_mount(self): def update_progress(counter_value: int): # (2)! self.query_one(ProgressBar).update(progress=counter_value) self.watch(self.query_one(Counter), "counter", update_progress) # (3)! if __name__ == "__main__": WatchApp().run()
WatchApp
python
mlflow__mlflow
mlflow/tracing/utils/warning.py
{ "start": 106, "end": 2655 }
class ____(logging.Filter): def __init__(self, module: str, message: str): super().__init__() self.module = module self.message = message def filter(self, record: logging.LogRecord) -> bool: if record.name == self.module and self.message in record.getMessage(): record.levelno = logging.DEBUG # Change the log level to DEBUG record.levelname = "DEBUG" # Check the log level for the logger is debug or not logger = logging.getLogger(self.module) return logger.isEnabledFor(logging.DEBUG) return True def __eq__(self, other): if isinstance(other, LogDemotionFilter): return self.module == other.module and self.message == other.message return False def suppress_warning(module: str, message: str): """ Convert the "Failed to detach context" log raised by the OpenTelemetry logger to DEBUG level so that it does not show up in the user's console. Args: module: The module name of the logger that raises the warning. message: The (part of) message in the log that needs to be demoted to DEBUG level """ try: logger = getattr(importlib.import_module(module), "logger", None) log_filter = LogDemotionFilter(module, message) if logger and not any(f == log_filter for f in logger.filters): logger.addFilter(log_filter) except Exception as e: _logger.debug(f"Failed to suppress the warning for {module}", exc_info=e) raise def request_id_backward_compatible(func): """ A decorator to support backward compatibility for the `request_id` parameter, which is deprecated and replaced by the `trace_id` parameter in tracing APIs. This decorator will adds `request_id` to the function signature and issue a deprecation warning if `request_id` is used with non-null value. """ @functools.wraps(func) def wrapper(*args, request_id: str | None = None, **kwargs): if request_id is not None: warnings.warn( f"The request_id parameter is deprecated from the {func.__name__} API " "and will be removed in a future version. Please use the `trace_id` " "parameter instead.", category=FutureWarning, stacklevel=2, ) if kwargs.get("trace_id") is None: kwargs["trace_id"] = request_id return func(*args, **kwargs) return wrapper
LogDemotionFilter
python
python-excel__xlrd
tests/test_xldate.py
{ "start": 234, "end": 2316 }
class ____(unittest.TestCase): def test_date_as_tuple(self): date = xldate.xldate_as_tuple(2741., DATEMODE) self.assertEqual(date, (1907, 7, 3, 0, 0, 0)) date = xldate.xldate_as_tuple(38406., DATEMODE) self.assertEqual(date, (2005, 2, 23, 0, 0, 0)) date = xldate.xldate_as_tuple(32266., DATEMODE) self.assertEqual(date, (1988, 5, 3, 0, 0, 0)) def test_time_as_tuple(self): time = xldate.xldate_as_tuple(.273611, DATEMODE) self.assertEqual(time, (0, 0, 0, 6, 34, 0)) time = xldate.xldate_as_tuple(.538889, DATEMODE) self.assertEqual(time, (0, 0, 0, 12, 56, 0)) time = xldate.xldate_as_tuple(.741123, DATEMODE) self.assertEqual(time, (0, 0, 0, 17, 47, 13)) def test_xldate_from_date_tuple(self): date = xldate.xldate_from_date_tuple( (1907, 7, 3), DATEMODE ) self.assertAlmostEqual(date, 2741.) date = xldate.xldate_from_date_tuple( (2005, 2, 23), DATEMODE ) self.assertAlmostEqual(date, 38406.) date = xldate.xldate_from_date_tuple( (1988, 5, 3), DATEMODE ) self.assertAlmostEqual(date, 32266.) def test_xldate_from_time_tuple(self): time = xldate.xldate_from_time_tuple( (6, 34, 0) ) self.assertAlmostEqual(time, .273611, places=6) time = xldate.xldate_from_time_tuple( (12, 56, 0) ) self.assertAlmostEqual(time, .538889, places=6) time = xldate.xldate_from_time_tuple( (17, 47, 13) ) self.assertAlmostEqual(time, .741123, places=6) def test_xldate_from_datetime_tuple(self): date = xldate.xldate_from_datetime_tuple( (1907, 7, 3, 6, 34, 0), DATEMODE) self.assertAlmostEqual(date, 2741.273611, places=6) date = xldate.xldate_from_datetime_tuple( (2005, 2, 23, 12, 56, 0), DATEMODE) self.assertAlmostEqual(date, 38406.538889, places=6) date = xldate.xldate_from_datetime_tuple( (1988, 5, 3, 17, 47, 13), DATEMODE) self.assertAlmostEqual(date, 32266.741123, places=6) if __name__=='__main__': unittest.main()
TestXLDate
python
bokeh__bokeh
tests/unit/bokeh/util/test_callback_manager.py
{ "start": 2238, "end": 2667 }
class ____: def __call__(self): pass def method(self): pass def _good_event(event): pass def _bad_event(x,y,z): pass def _partially_good_event(arg, event): pass def _partially_bad_event(event): pass #----------------------------------------------------------------------------- # General API #-----------------------------------------------------------------------------
_BadEventCallback
python
pypa__pip
src/pip/_vendor/urllib3/connection.py
{ "start": 10127, "end": 20107 }
class ____(HTTPConnection): """ Many of the parameters to this constructor are passed to the underlying SSL socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. """ default_port = port_by_scheme["https"] cert_reqs = None ca_certs = None ca_cert_dir = None ca_cert_data = None ssl_version = None assert_fingerprint = None tls_in_tls_required = False def __init__( self, host, port=None, key_file=None, cert_file=None, key_password=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None, server_hostname=None, **kw ): HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw) self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname # Required property for Google AppEngine 1.9.0 which otherwise causes # HTTPS requests to go out as HTTP. (See Issue #356) self._protocol = "https" def set_cert( self, key_file=None, cert_file=None, cert_reqs=None, key_password=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, ca_cert_data=None, ): """ This method should only be called once, before the connection is used. """ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data def connect(self): # Add certificate verification self.sock = conn = self._new_conn() hostname = self.host tls_in_tls = False if self._is_using_tunnel(): if self.tls_in_tls_required: self.sock = conn = self._connect_tls_proxy(hostname, conn) tls_in_tls = True # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # Override the host with the one we're requesting data from. hostname = self._tunnel_host server_hostname = hostname if self.server_hostname is not None: server_hostname = self.server_hostname is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn( ( "System time is way off (before {0}). This will probably " "lead to SSL verification errors" ).format(RECENT_DATE), SystemTimeWarning, ) # Wrap socket using verification with the root certs in # trusted_root_certs default_ssl_context = False if self.ssl_context is None: default_ssl_context = True self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), ) context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) # Try to load OS default certs if none are given. # Works well on Windows (requires Python3.4+) if ( not self.ca_certs and not self.ca_cert_dir and not self.ca_cert_data and default_ssl_context and hasattr(context, "load_default_certs") ): context.load_default_certs() self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, key_password=self.key_password, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=server_hostname, ssl_context=context, tls_in_tls=tls_in_tls, ) # If we're using all defaults and the connection # is TLSv1 or TLSv1.1 we throw a DeprecationWarning # for the host. if ( default_ssl_context and self.ssl_version is None and hasattr(self.sock, "version") and self.sock.version() in {"TLSv1", "TLSv1.1"} ): # Defensive: warnings.warn( "Negotiating TLSv1/TLSv1.1 by default is deprecated " "and will be disabled in urllib3 v2.0.0. Connecting to " "'%s' with '%s' can be enabled by explicitly opting-in " "with 'ssl_version'" % (self.host, self.sock.version()), DeprecationWarning, ) if self.assert_fingerprint: assert_fingerprint( self.sock.getpeercert(binary_form=True), self.assert_fingerprint ) elif ( context.verify_mode != ssl.CERT_NONE and not getattr(context, "check_hostname", False) and self.assert_hostname is not False ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = self.sock.getpeercert() if not cert.get("subjectAltName", ()): warnings.warn( ( "Certificate for {0} has no `subjectAltName`, falling back to check for a " "`commonName` for now. This feature is being removed by major browsers and " "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " "for details.)".format(hostname) ), SubjectAltNameWarning, ) _match_hostname(cert, self.assert_hostname or server_hostname) self.is_verified = ( context.verify_mode == ssl.CERT_REQUIRED or self.assert_fingerprint is not None ) def _connect_tls_proxy(self, hostname, conn): """ Establish a TLS connection to the proxy using the provided SSL context. """ proxy_config = self.proxy_config ssl_context = proxy_config.ssl_context if ssl_context: # If the user provided a proxy context, we assume CA and client # certificates have already been set return ssl_wrap_socket( sock=conn, server_hostname=hostname, ssl_context=ssl_context, ) ssl_context = create_proxy_ssl_context( self.ssl_version, self.cert_reqs, self.ca_certs, self.ca_cert_dir, self.ca_cert_data, ) # If no cert was provided, use only the default options for server # certificate validation socket = ssl_wrap_socket( sock=conn, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, ) if ssl_context.verify_mode != ssl.CERT_NONE and not getattr( ssl_context, "check_hostname", False ): # While urllib3 attempts to always turn off hostname matching from # the TLS library, this cannot always be done. So we check whether # the TLS Library still thinks it's matching hostnames. cert = socket.getpeercert() if not cert.get("subjectAltName", ()): warnings.warn( ( "Certificate for {0} has no `subjectAltName`, falling back to check for a " "`commonName` for now. This feature is being removed by major browsers and " "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 " "for details.)".format(hostname) ), SubjectAltNameWarning, ) _match_hostname(cert, hostname) self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED return socket def _match_hostname(cert, asserted_hostname): # Our upstream implementation of ssl.match_hostname() # only applies this normalization to IP addresses so it doesn't # match DNS SANs so we do the same thing! stripped_hostname = asserted_hostname.strip("u[]") if is_ipaddress(stripped_hostname): asserted_hostname = stripped_hostname try: match_hostname(cert, asserted_hostname) except CertificateError as e: log.warning( "Certificate did not match expected hostname: %s. Certificate: %s", asserted_hostname, cert, ) # Add cert to exception and reraise so client code can inspect # the cert when catching the exception, if they want to e._peer_cert = cert raise def _get_default_user_agent(): return "python-urllib3/%s" % __version__
HTTPSConnection