language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
great-expectations__great_expectations
great_expectations/datasource/fluent/data_connector/s3_data_connector.py
{ "start": 793, "end": 959 }
class ____(pydantic.BaseModel): s3_prefix: str = "" s3_delimiter: str = "/" s3_max_keys: int = 1000 s3_recursive_file_discovery: bool = False
_S3Options
python
lazyprogrammer__machine_learning_examples
rnn_class/batch_parity.py
{ "start": 512, "end": 5864 }
class ____: def __init__(self, M): self.M = M # hidden layer size def fit(self, X, Y, batch_sz=20, learning_rate=1.0, mu=0.99, reg=1.0, activation=T.tanh, epochs=100, show_fig=False): D = X[0].shape[1] # X is of size N x T(n) x D K = len(set(Y.flatten())) N = len(Y) M = self.M self.f = activation # initial weights Wx = init_weight(D, M) Wh = init_weight(M, M) bh = np.zeros(M) h0 = np.zeros(M) Wo = init_weight(M, K) bo = np.zeros(K) # make them theano shared self.Wx = theano.shared(Wx) self.Wh = theano.shared(Wh) self.bh = theano.shared(bh) self.h0 = theano.shared(h0) self.Wo = theano.shared(Wo) self.bo = theano.shared(bo) self.params = [self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo] thX = T.fmatrix('X') # will represent multiple batches concatenated thY = T.ivector('Y') thStartPoints = T.ivector('start_points') XW = thX.dot(self.Wx) # startPoints will contain 1 where a sequence starts and 0 otherwise # Ex. if I have 3 sequences: [[1,2,3], [4,5], [6,7,8]] # Then I will concatenate these into one X: [1,2,3,4,5,6,7,8] # And startPoints will be [1,0,0,1,0,1,0,0] # One possible solution: loop through index # def recurrence(t, h_t1, XW, h0, startPoints): # # returns h(t) # # if at a boundary, state should be h0 # h_t = T.switch( # T.eq(startPoints[t], 1), # self.f(XW[t] + h0.dot(self.Wh) + self.bh), # self.f(XW[t] + h_t1.dot(self.Wh) + self.bh) # ) # return h_t # h, _ = theano.scan( # fn=recurrence, # outputs_info=[self.h0], # sequences=T.arange(XW.shape[0]), # non_sequences=[XW, self.h0, thStartPoints], # n_steps=XW.shape[0], # ) # other solution - loop through all sequences simultaneously def recurrence(xw_t, is_start, h_t1, h0): # if at a boundary, state should be h0 h_t = T.switch( T.eq(is_start, 1), self.f(xw_t + h0.dot(self.Wh) + self.bh), self.f(xw_t + h_t1.dot(self.Wh) + self.bh) ) return h_t h, _ = theano.scan( fn=recurrence, outputs_info=[self.h0], sequences=[XW, thStartPoints], non_sequences=[self.h0], n_steps=XW.shape[0], ) # h is of shape (T*batch_sz, M) py_x = T.nnet.softmax(h.dot(self.Wo) + self.bo) prediction = T.argmax(py_x, axis=1) cost = -T.mean(T.log(py_x[T.arange(thY.shape[0]), thY])) grads = T.grad(cost, self.params) dparams = [theano.shared(p.get_value()*0) for p in self.params] updates = [ (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads) ] + [ (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads) ] # self.predict_op = theano.function(inputs=[thX, thStartPoints], outputs=prediction) self.train_op = theano.function( inputs=[thX, thY, thStartPoints], outputs=[cost, prediction, py_x], updates=updates ) costs = [] n_batches = N // batch_sz sequenceLength = X.shape[1] # if each sequence was of variable length, we would need to # initialize this inside the loop for every new batch startPoints = np.zeros(sequenceLength*batch_sz, dtype=np.int32) for b in range(batch_sz): startPoints[b*sequenceLength] = 1 for i in range(epochs): X, Y = shuffle(X, Y) n_correct = 0 cost = 0 for j in range(n_batches): Xbatch = X[j*batch_sz:(j+1)*batch_sz].reshape(sequenceLength*batch_sz, D) Ybatch = Y[j*batch_sz:(j+1)*batch_sz].reshape(sequenceLength*batch_sz).astype(np.int32) c, p, rout = self.train_op(Xbatch, Ybatch, startPoints) # print "p:", p cost += c # P = p.reshape(batch_sz, sequenceLength) for b in range(batch_sz): idx = sequenceLength*(b + 1) - 1 if p[idx] == Ybatch[idx]: n_correct += 1 # else: # print "pred:", p[idx], "actual:", Ybatch[idx] if i % 10 == 0: print("shape y:", rout.shape) print("i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N)) if n_correct == N: print("i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N)) break costs.append(cost) if show_fig: plt.plot(costs) plt.show() def parity(B=12, learning_rate=1e-3, epochs=3000): X, Y = all_parity_pairs_with_sequence_labels(B) rnn = SimpleRNN(4) rnn.fit(X, Y, batch_sz=10, learning_rate=learning_rate, epochs=epochs, activation=T.nnet.sigmoid, show_fig=False ) if __name__ == '__main__': parity()
SimpleRNN
python
Pylons__pyramid
tests/test_request.py
{ "start": 156, "end": 12385 }
class ____(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): testing.tearDown() def _getTargetClass(self): from pyramid.request import Request return Request def _makeOne(self, environ=None): if environ is None: environ = {} return self._getTargetClass()(environ) def _registerResourceURL(self): from zope.interface import Interface from pyramid.interfaces import IResourceURL class DummyResourceURL: def __init__(self, context, request): self.physical_path = '/context/' self.virtual_path = '/context/' self.config.registry.registerAdapter( DummyResourceURL, (Interface, Interface), IResourceURL ) def test_class_conforms_to_IRequest(self): from zope.interface.verify import verifyClass from pyramid.interfaces import IRequest verifyClass(IRequest, self._getTargetClass()) def test_instance_conforms_to_IRequest(self): from zope.interface.verify import verifyObject from pyramid.interfaces import IRequest verifyObject(IRequest, self._makeOne()) def test_ResponseClass_is_pyramid_Response(self): from pyramid.response import Response cls = self._getTargetClass() self.assertEqual(cls.ResponseClass, Response) def test_implements_security_apis(self): apis = (SecurityAPIMixin, AuthenticationAPIMixin) r = self._makeOne() self.assertTrue(isinstance(r, apis)) def test_charset_defaults_to_utf8(self): r = self._makeOne({'PATH_INFO': '/'}) self.assertEqual(r.charset, 'UTF-8') def test_exception_defaults_to_None(self): r = self._makeOne({'PATH_INFO': '/'}) self.assertEqual(r.exception, None) def test_matchdict_defaults_to_None(self): r = self._makeOne({'PATH_INFO': '/'}) self.assertEqual(r.matchdict, None) def test_matched_route_defaults_to_None(self): r = self._makeOne({'PATH_INFO': '/'}) self.assertEqual(r.matched_route, None) def test_params_decoded_from_utf_8_by_default(self): environ = {'PATH_INFO': '/', 'QUERY_STRING': 'la=La%20Pe%C3%B1a'} request = self._makeOne(environ) request.charset = None self.assertEqual(request.GET['la'], text_(b'La Pe\xf1a')) def test_tmpl_context(self): from pyramid.request import TemplateContext inst = self._makeOne() result = inst.tmpl_context self.assertEqual(result.__class__, TemplateContext) def test_session_configured(self): from pyramid.interfaces import ISessionFactory inst = self._makeOne() def factory(request): return 'orangejuice' self.config.registry.registerUtility(factory, ISessionFactory) inst.registry = self.config.registry self.assertEqual(inst.session, 'orangejuice') self.assertEqual(inst.__dict__['session'], 'orangejuice') def test_session_not_configured(self): inst = self._makeOne() inst.registry = self.config.registry self.assertRaises(AttributeError, getattr, inst, 'session') def test_setattr_and_getattr_dotnotation(self): inst = self._makeOne() inst.foo = 1 self.assertEqual(inst.foo, 1) def test_setattr_and_getattr(self): environ = {} inst = self._makeOne(environ) setattr(inst, 'bar', 1) self.assertEqual(getattr(inst, 'bar'), 1) self.assertEqual(environ, {}) # make sure we're not using adhoc attrs def test_add_response_callback(self): inst = self._makeOne() self.assertEqual(len(inst.response_callbacks), 0) def callback(request, response): """ """ inst.add_response_callback(callback) self.assertEqual(list(inst.response_callbacks), [callback]) inst.add_response_callback(callback) self.assertEqual(list(inst.response_callbacks), [callback, callback]) def test__process_response_callbacks(self): inst = self._makeOne() def callback1(request, response): request.called1 = True response.called1 = True def callback2(request, response): request.called2 = True response.called2 = True inst.add_response_callback(callback1) inst.add_response_callback(callback2) response = DummyResponse() inst._process_response_callbacks(response) self.assertEqual(inst.called1, True) self.assertEqual(inst.called2, True) self.assertEqual(response.called1, True) self.assertEqual(response.called2, True) self.assertEqual(len(inst.response_callbacks), 0) def test__process_response_callback_adding_response_callback(self): """ When a response callback adds another callback, that new callback should still be called. See https://github.com/Pylons/pyramid/pull/1373 """ inst = self._makeOne() def callback1(request, response): request.called1 = True response.called1 = True request.add_response_callback(callback2) def callback2(request, response): request.called2 = True response.called2 = True inst.add_response_callback(callback1) response = DummyResponse() inst._process_response_callbacks(response) self.assertEqual(inst.called1, True) self.assertEqual(inst.called2, True) self.assertEqual(response.called1, True) self.assertEqual(response.called2, True) self.assertEqual(len(inst.response_callbacks), 0) def test_add_finished_callback(self): inst = self._makeOne() self.assertEqual(len(inst.finished_callbacks), 0) def callback(request): """ """ inst.add_finished_callback(callback) self.assertEqual(list(inst.finished_callbacks), [callback]) inst.add_finished_callback(callback) self.assertEqual(list(inst.finished_callbacks), [callback, callback]) def test__process_finished_callbacks(self): inst = self._makeOne() def callback1(request): request.called1 = True def callback2(request): request.called2 = True inst.add_finished_callback(callback1) inst.add_finished_callback(callback2) inst._process_finished_callbacks() self.assertEqual(inst.called1, True) self.assertEqual(inst.called2, True) self.assertEqual(len(inst.finished_callbacks), 0) def test_resource_url(self): self._registerResourceURL() environ = { 'PATH_INFO': '/', 'SERVER_NAME': 'example.com', 'SERVER_PORT': '80', 'wsgi.url_scheme': 'http', } inst = self._makeOne(environ) root = DummyContext() result = inst.resource_url(root) self.assertEqual(result, 'http://example.com/context/') def test_route_url(self): environ = { 'PATH_INFO': '/', 'SERVER_NAME': 'example.com', 'SERVER_PORT': '5432', 'QUERY_STRING': 'la=La%20Pe%C3%B1a', 'wsgi.url_scheme': 'http', } from pyramid.interfaces import IRoutesMapper inst = self._makeOne(environ) mapper = DummyRoutesMapper(route=DummyRoute('/1/2/3')) self.config.registry.registerUtility(mapper, IRoutesMapper) result = inst.route_url( 'flub', 'extra1', 'extra2', a=1, b=2, c=3, _query={'a': 1}, _anchor=text_("foo"), ) self.assertEqual( result, 'http://example.com:5432/1/2/3/extra1/extra2?a=1#foo' ) def test_route_path(self): environ = { 'PATH_INFO': '/', 'SERVER_NAME': 'example.com', 'SERVER_PORT': '5432', 'QUERY_STRING': 'la=La%20Pe%C3%B1a', 'wsgi.url_scheme': 'http', } from pyramid.interfaces import IRoutesMapper inst = self._makeOne(environ) mapper = DummyRoutesMapper(route=DummyRoute('/1/2/3')) self.config.registry.registerUtility(mapper, IRoutesMapper) result = inst.route_path( 'flub', 'extra1', 'extra2', a=1, b=2, c=3, _query={'a': 1}, _anchor=text_("foo"), ) self.assertEqual(result, '/1/2/3/extra1/extra2?a=1#foo') def test_static_url(self): from pyramid.interfaces import IStaticURLInfo environ = { 'PATH_INFO': '/', 'SERVER_NAME': 'example.com', 'SERVER_PORT': '5432', 'QUERY_STRING': '', 'wsgi.url_scheme': 'http', } request = self._makeOne(environ) info = DummyStaticURLInfo('abc') self.config.registry.registerUtility(info, IStaticURLInfo) result = request.static_url('pyramid.tests:static/foo.css') self.assertEqual(result, 'abc') self.assertEqual( info.args, ('pyramid.tests:static/foo.css', request, {}) ) def test_is_response_false(self): request = self._makeOne() request.registry = self.config.registry self.assertEqual(request.is_response('abc'), False) def test_is_response_true_ob_is_pyramid_response(self): from pyramid.response import Response r = Response('hello') request = self._makeOne() request.registry = self.config.registry self.assertEqual(request.is_response(r), True) def test_is_response_false_adapter_is_not_self(self): from pyramid.interfaces import IResponse request = self._makeOne() request.registry = self.config.registry def adapter(ob): return object() class Foo: pass foo = Foo() request.registry.registerAdapter(adapter, (Foo,), IResponse) self.assertEqual(request.is_response(foo), False) def test_is_response_adapter_true(self): from pyramid.interfaces import IResponse request = self._makeOne() request.registry = self.config.registry class Foo: pass foo = Foo() def adapter(ob): return ob request.registry.registerAdapter(adapter, (Foo,), IResponse) self.assertEqual(request.is_response(foo), True) def test_json_body_invalid_json(self): request = self._makeOne({'REQUEST_METHOD': 'POST'}) request.body = b'{' self.assertRaises(ValueError, getattr, request, 'json_body') def test_json_body_valid_json(self): request = self._makeOne({'REQUEST_METHOD': 'POST'}) request.body = b'{"a":1}' self.assertEqual(request.json_body, {'a': 1}) def test_json_body_alternate_charset(self): import json request = self._makeOne({'REQUEST_METHOD': 'POST'}) inp = text_( b'/\xe6\xb5\x81\xe8\xa1\x8c\xe8\xb6\x8b\xe5\x8a\xbf', 'utf-8' ) body = bytes(json.dumps({'a': inp}), 'utf-16') request.body = body request.content_type = 'application/json; charset=utf-16' self.assertEqual(request.json_body, {'a': inp}) def test_json_body_GET_request(self): request = self._makeOne({'REQUEST_METHOD': 'GET'}) self.assertRaises(ValueError, getattr, request, 'json_body') def test_set_property(self): request = self._makeOne() opts = [2, 1] def connect(obj): return opts.pop() request.set_property(connect, name='db') self.assertEqual(1, request.db) self.assertEqual(2, request.db) def test_set_property_reify(self): request = self._makeOne() opts = [2, 1] def connect(obj): return opts.pop() request.set_property(connect, name='db', reify=True) self.assertEqual(1, request.db) self.assertEqual(1, request.db)
TestRequest
python
getsentry__sentry
src/sentry/replays/lib/event_linking.py
{ "start": 566, "end": 675 }
class ____(TypedDict): type: str replay_id: str timestamp: int event_hash: str
EventLinkPayload
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-autoembeddings/llama_index/embeddings/autoembeddings/base.py
{ "start": 349, "end": 1817 }
class ____(BaseEmbedding): """ Autoembeddings from chonkie. Args: model_name (str): The name of the model to use. """ model_name: str embedder: Optional[chonkie.BaseEmbeddings] = None def __init__(self, model_name: str) -> None: super().__init__(model_name=model_name) self.embedder = AutoEmbeddings.get_embeddings(self.model_name) @classmethod def class_name(cls) -> str: return "ChonkieAutoEmbedding" def _get_embedding(self, text: str) -> List[float]: embed = self.embedder.embed(text) return embed.tolist() async def _aget_embedding(self, text: str) -> List[float]: return self._get_embedding(text) def _get_embeddings(self, texts: List[str]) -> List[List[float]]: embeds = self.embedder.embed_batch(texts) return [e.tolist() for e in embeds] async def _aget_embeddings( self, texts: List[str], ) -> List[List[float]]: return self._get_embeddings(texts) def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return self._get_embedding(query) async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" return await self._aget_embedding(query) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" return self._get_embedding(text)
ChonkieAutoEmbedding
python
pydata__xarray
asv_bench/benchmarks/pandas.py
{ "start": 106, "end": 735 }
class ____: def setup(self, dtype, subset): data = np.random.rand(100000).astype(dtype) index = pd.MultiIndex.from_product( [ list("abcdefhijk"), list("abcdefhijk"), pd.date_range(start="2000-01-01", periods=1000, freq="D"), ] ) series = pd.Series(data, index) if subset: series = series[::3] self.series = series @parameterized(["dtype", "subset"], ([int, float], [True, False])) def time_from_series(self, dtype, subset): xr.DataArray.from_series(self.series)
MultiIndexSeries
python
tensorflow__tensorflow
tensorflow/python/data/experimental/kernel_tests/index_shuffle_test.py
{ "start": 6687, "end": 8679 }
class ____(checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_dataset( self, num_elements_per_file, num_files, num_epochs, seed=None, reshuffle_each_iteration=None, symbolic_checkpoint=None, ): file_infos = [] for _ in range(num_files): file_infos.append({ "path": "unused", "num_elements": num_elements_per_file, }) def reader_factory(files): return dataset_ops.Dataset.range( num_elements_per_file * array_ops.shape(files, out_type=dtypes.int64)[0]) dataset = shuffle_ops.index_shuffle( file_infos, reader_factory, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration) dataset = dataset.repeat(num_epochs) if symbolic_checkpoint: options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint dataset = dataset.with_options(options) return dataset @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine( symbolic_checkpoint=[False, True], reshuffle_each_iteration=[False, True]))) def test(self, verify_fn, symbolic_checkpoint, reshuffle_each_iteration): seed = 42 num_elements_per_file = 8 num_files = 3 num_epochs = 2 num_outputs = num_elements_per_file * num_files * num_epochs # pylint: disable=g-long-lambda verify_fn( self, lambda: self._build_dataset( num_elements_per_file=num_elements_per_file, num_files=num_files, num_epochs=num_epochs, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration, symbolic_checkpoint=symbolic_checkpoint), num_outputs) if __name__ == "__main__": test.main()
IndexShuffleCheckpointTest
python
celery__celery
t/unit/worker/test_autoscale.py
{ "start": 221, "end": 815 }
class ____(BasePool): shrink_raises_exception = False shrink_raises_ValueError = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._pool = Bunch(_processes=self.limit) def grow(self, n=1): self._pool._processes += n def shrink(self, n=1): if self.shrink_raises_exception: raise KeyError('foo') if self.shrink_raises_ValueError: raise ValueError('foo') self._pool._processes -= n @property def num_processes(self): return self._pool._processes
MockPool
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_minnesota_zip.py
{ "start": 752, "end": 1759 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_minnesota_zip" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_minnesota_zip(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidMinnesotaZip
python
run-llama__llama_index
llama-index-core/llama_index/core/memory/types.py
{ "start": 2412, "end": 5034 }
class ____(BaseMemory): """Base class for storing multi-tenant chat history.""" chat_store: SerializeAsAny[BaseChatStore] = Field(default_factory=SimpleChatStore) chat_store_key: str = Field(default=DEFAULT_CHAT_STORE_KEY) @field_serializer("chat_store") def serialize_courses_in_order(self, chat_store: BaseChatStore) -> dict: res = chat_store.model_dump() res.update({"class_name": chat_store.class_name()}) return res @classmethod def class_name(cls) -> str: """Get class name.""" return "BaseChatStoreMemory" @classmethod @abstractmethod def from_defaults( cls, chat_history: Optional[List[ChatMessage]] = None, llm: Optional[LLM] = None, **kwargs: Any, ) -> "BaseChatStoreMemory": """Create a chat memory from defaults.""" def get_all(self) -> List[ChatMessage]: """Get all chat history.""" return self.chat_store.get_messages(self.chat_store_key) async def aget_all(self) -> List[ChatMessage]: """Get all chat history.""" return await self.chat_store.aget_messages(self.chat_store_key) def get(self, input: Optional[str] = None, **kwargs: Any) -> List[ChatMessage]: """Get chat history.""" return self.chat_store.get_messages(self.chat_store_key, **kwargs) async def aget( self, input: Optional[str] = None, **kwargs: Any ) -> List[ChatMessage]: """Get chat history.""" return await self.chat_store.aget_messages(self.chat_store_key, **kwargs) def put(self, message: ChatMessage) -> None: """Put chat history.""" # ensure everything is serialized self.chat_store.add_message(self.chat_store_key, message) async def aput(self, message: ChatMessage) -> None: """Put chat history.""" # ensure everything is serialized await self.chat_store.async_add_message(self.chat_store_key, message) def set(self, messages: List[ChatMessage]) -> None: """Set chat history.""" self.chat_store.set_messages(self.chat_store_key, messages) async def aset(self, messages: List[ChatMessage]) -> None: """Set chat history.""" # ensure everything is serialized await self.chat_store.aset_messages(self.chat_store_key, messages) def reset(self) -> None: """Reset chat history.""" self.chat_store.delete_messages(self.chat_store_key) async def areset(self) -> None: """Reset chat history.""" await self.chat_store.adelete_messages(self.chat_store_key)
BaseChatStoreMemory
python
scipy__scipy
scipy/linalg/tests/test_decomp_update.py
{ "start": 2725, "end": 3342 }
class ____: def setup_method(self): self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2) self.atol = 10 * np.finfo(self.dtype).eps def generate(self, type, mode='full'): rng = np.random.default_rng(29382) shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12), 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type] a = rng.random(shape) if np.iscomplexobj(self.dtype.type(1)): b = rng.random(shape) a = a + 1j * b a = a.astype(self.dtype) q, r = linalg.qr(a, mode=mode) return a, q, r
BaseQRdeltas
python
crytic__slither
slither/tools/upgradeability/checks/variables_order.py
{ "start": 300, "end": 1849 }
class ____(AbstractCheck): ARGUMENT = "missing-variables" IMPACT = CheckClassification.MEDIUM HELP = "Variable missing in the v2" WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#missing-variables" WIKI_TITLE = "Missing variables" # region wiki_description WIKI_DESCRIPTION = """ Detect variables that were present in the original contracts but are not in the updated one. """ # endregion wiki_description # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract V1{ uint variable1; uint variable2; } contract V2{ uint variable1; } ``` The new version, `V2` does not contain `variable1`. If a new variable is added in an update of `V2`, this variable will hold the latest value of `variable2` and will be corrupted. """ # endregion wiki_exploit_scenario # region wiki_recommendation WIKI_RECOMMENDATION = """ Do not change the order of the state variables in the updated contract. """ # endregion wiki_recommendation REQUIRE_CONTRACT = True REQUIRE_CONTRACT_V2 = True def _check(self) -> List[Output]: contract1 = self.contract contract2 = self.contract_v2 assert contract2 missing = get_missing_vars(contract1, contract2) results = [] for variable1 in missing: info: CHECK_INFO = ["Variable missing in ", contract2, ": ", variable1, "\n"] json = self.generate_result(info) results.append(json) return results
MissingVariable
python
numpy__numpy
tools/swig/test/testFlat.py
{ "start": 2883, "end": 3144 }
class ____(FlatTestCase): def __init__(self, methodName="runTest"): FlatTestCase.__init__(self, methodName) self.typeStr = "uchar" self.typeCode = "B" ######################################################################
ucharTestCase
python
fastai__fastai
fastai/layers.py
{ "start": 12164, "end": 12941 }
class ____(Module): "Self attention layer for `n_channels`." def __init__(self, n_channels): self.query,self.key,self.value = [self._conv(n_channels, c) for c in (n_channels//8,n_channels//8,n_channels)] self.gamma = nn.Parameter(tensor([0.])) def _conv(self,n_in,n_out): return ConvLayer(n_in, n_out, ks=1, ndim=1, norm_type=NormType.Spectral, act_cls=None, bias=False) def forward(self, x): #Notation from the paper. size = x.size() x = x.view(*size[:2],-1) f,g,h = self.query(x),self.key(x),self.value(x) beta = F.softmax(torch.bmm(f.transpose(1,2), g), dim=1) o = self.gamma * torch.bmm(h, beta) + x return o.view(*size).contiguous() # %% ../nbs/01_layers.ipynb 95
SelfAttention
python
python__mypy
mypy/typeanal.py
{ "start": 4576, "end": 89288 }
class ____(SyntheticTypeVisitor[Type], TypeAnalyzerPluginInterface): """Semantic analyzer for types. Converts unbound types into bound types. This is a no-op for already bound types. If an incomplete reference is encountered, this does a defer. The caller never needs to defer. """ # Is this called from an untyped function definition? in_dynamic_func: bool = False # Is this called from global scope? global_scope: bool = True def __init__( self, api: SemanticAnalyzerCoreInterface, tvar_scope: TypeVarLikeScope, plugin: Plugin, options: Options, cur_mod_node: MypyFile, is_typeshed_stub: bool, *, defining_alias: bool = False, python_3_12_type_alias: bool = False, allow_tuple_literal: bool = False, allow_unbound_tvars: bool = False, allow_placeholder: bool = False, allow_typed_dict_special_forms: bool = False, allow_final: bool = True, allow_param_spec_literals: bool = False, allow_unpack: bool = False, report_invalid_types: bool = True, prohibit_self_type: str | None = None, prohibit_special_class_field_types: str | None = None, allowed_alias_tvars: list[TypeVarLikeType] | None = None, allow_type_any: bool = False, alias_type_params_names: list[str] | None = None, ) -> None: self.api = api self.fail_func = api.fail self.note_func = api.note self.tvar_scope = tvar_scope # Are we analysing a type alias definition rvalue? self.defining_alias = defining_alias self.python_3_12_type_alias = python_3_12_type_alias self.allow_tuple_literal = allow_tuple_literal # Positive if we are analyzing arguments of another (outer) type self.nesting_level = 0 # Should we allow new type syntax when targeting older Python versions # like 'list[int]' or 'X | Y' (allowed in stubs and with `__future__` import)? self.always_allow_new_syntax = self.api.is_stub_file or self.api.is_future_flag_set( "annotations" ) # Should we accept unbound type variables? This is currently used for class bases, # and alias right hand sides (before they are analyzed as type aliases). self.allow_unbound_tvars = allow_unbound_tvars if allowed_alias_tvars is None: allowed_alias_tvars = [] self.allowed_alias_tvars = allowed_alias_tvars self.alias_type_params_names = alias_type_params_names # If false, record incomplete ref if we generate PlaceholderType. self.allow_placeholder = allow_placeholder # Are we in a context where Required[] is allowed? self.allow_typed_dict_special_forms = allow_typed_dict_special_forms # Set True when we analyze ClassVar else False self.allow_final = allow_final # Are we in a context where ParamSpec literals are allowed? self.allow_param_spec_literals = allow_param_spec_literals # Are we in context where literal "..." specifically is allowed? self.allow_ellipsis = False # Should we report an error whenever we encounter a RawExpressionType outside # of a Literal context: e.g. whenever we encounter an invalid type? Normally, # we want to report an error, but the caller may want to do more specialized # error handling. self.report_invalid_types = report_invalid_types self.plugin = plugin self.options = options self.cur_mod_node = cur_mod_node self.is_typeshed_stub = is_typeshed_stub # Names of type aliases encountered while analysing a type will be collected here. self.aliases_used: set[str] = set() self.prohibit_self_type = prohibit_self_type # Set when we analyze TypedDicts or NamedTuples, since they are special: self.prohibit_special_class_field_types = prohibit_special_class_field_types # Allow variables typed as Type[Any] and type (useful for base classes). self.allow_type_any = allow_type_any self.allow_type_var_tuple = False self.allow_unpack = allow_unpack def lookup_qualified( self, name: str, ctx: Context, suppress_errors: bool = False ) -> SymbolTableNode | None: return self.api.lookup_qualified(name, ctx, suppress_errors) def lookup_fully_qualified(self, fullname: str) -> SymbolTableNode: return self.api.lookup_fully_qualified(fullname) def visit_unbound_type(self, t: UnboundType, defining_literal: bool = False) -> Type: typ = self.visit_unbound_type_nonoptional(t, defining_literal) if t.optional: # We don't need to worry about double-wrapping Optionals or # wrapping Anys: Union simplification will take care of that. return make_optional_type(typ) return typ def not_declared_in_type_params(self, tvar_name: str) -> bool: return ( self.alias_type_params_names is not None and tvar_name not in self.alias_type_params_names ) def visit_unbound_type_nonoptional(self, t: UnboundType, defining_literal: bool) -> Type: sym = self.lookup_qualified(t.name, t) param_spec_name = None if t.name.endswith((".args", ".kwargs")): param_spec_name = t.name.rsplit(".", 1)[0] maybe_param_spec = self.lookup_qualified(param_spec_name, t) if maybe_param_spec and isinstance(maybe_param_spec.node, ParamSpecExpr): sym = maybe_param_spec else: param_spec_name = None if sym is not None: node = sym.node if isinstance(node, PlaceholderNode): if node.becomes_typeinfo: # Reference to placeholder type. if self.api.final_iteration: self.cannot_resolve_type(t) return AnyType(TypeOfAny.from_error) elif self.allow_placeholder: self.api.defer() else: self.api.record_incomplete_ref() # Always allow ParamSpec for placeholders, if they are actually not valid, # they will be reported later, after we resolve placeholders. return PlaceholderType( node.fullname, self.anal_array( t.args, allow_param_spec=True, allow_param_spec_literals=True, allow_unpack=True, ), t.line, ) else: if self.api.final_iteration: self.cannot_resolve_type(t) return AnyType(TypeOfAny.from_error) else: # Reference to an unknown placeholder node. self.api.record_incomplete_ref() return AnyType(TypeOfAny.special_form) if node is None: self.fail(f"Internal error (node is None, kind={sym.kind})", t) return AnyType(TypeOfAny.special_form) fullname = node.fullname hook = self.plugin.get_type_analyze_hook(fullname) if hook is not None: return hook(AnalyzeTypeContext(t, t, self)) tvar_def = self.tvar_scope.get_binding(sym) if isinstance(sym.node, ParamSpecExpr): if tvar_def is None: if self.allow_unbound_tvars: return t name = param_spec_name or t.name if self.defining_alias and self.not_declared_in_type_params(t.name): msg = f'ParamSpec "{name}" is not included in type_params' else: msg = f'ParamSpec "{name}" is unbound' self.fail(msg, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) assert isinstance(tvar_def, ParamSpecType) if len(t.args) > 0: self.fail( f'ParamSpec "{t.name}" used with arguments', t, code=codes.VALID_TYPE ) if param_spec_name is not None and not self.allow_param_spec_literals: self.fail( "ParamSpec components are not allowed here", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) # Change the line number return ParamSpecType( tvar_def.name, tvar_def.fullname, tvar_def.id, tvar_def.flavor, tvar_def.upper_bound, tvar_def.default, line=t.line, column=t.column, ) if ( isinstance(sym.node, TypeVarExpr) and self.defining_alias and not defining_literal and (tvar_def is None or tvar_def not in self.allowed_alias_tvars) ): if self.not_declared_in_type_params(t.name): if self.python_3_12_type_alias: msg = message_registry.TYPE_PARAMETERS_SHOULD_BE_DECLARED.format( f'"{t.name}"' ) else: msg = f'Type variable "{t.name}" is not included in type_params' else: msg = f'Can\'t use bound type variable "{t.name}" to define generic alias' self.fail(msg, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) if isinstance(sym.node, TypeVarExpr) and tvar_def is not None: assert isinstance(tvar_def, TypeVarType) if len(t.args) > 0: self.fail( f'Type variable "{t.name}" used with arguments', t, code=codes.VALID_TYPE ) # Change the line number return tvar_def.copy_modified(line=t.line, column=t.column) if isinstance(sym.node, TypeVarTupleExpr) and ( tvar_def is not None and self.defining_alias and tvar_def not in self.allowed_alias_tvars ): if self.not_declared_in_type_params(t.name): msg = f'Type variable "{t.name}" is not included in type_params' else: msg = f'Can\'t use bound type variable "{t.name}" to define generic alias' self.fail(msg, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) if isinstance(sym.node, TypeVarTupleExpr): if tvar_def is None: if self.allow_unbound_tvars: return t if self.defining_alias and self.not_declared_in_type_params(t.name): if self.python_3_12_type_alias: msg = message_registry.TYPE_PARAMETERS_SHOULD_BE_DECLARED.format( f'"{t.name}"' ) else: msg = f'TypeVarTuple "{t.name}" is not included in type_params' else: msg = f'TypeVarTuple "{t.name}" is unbound' self.fail(msg, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) assert isinstance(tvar_def, TypeVarTupleType) if not self.allow_type_var_tuple: self.fail( f'TypeVarTuple "{t.name}" is only valid with an unpack', t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) if len(t.args) > 0: self.fail( f'Type variable "{t.name}" used with arguments', t, code=codes.VALID_TYPE ) # Change the line number return TypeVarTupleType( tvar_def.name, tvar_def.fullname, tvar_def.id, tvar_def.upper_bound, sym.node.tuple_fallback, tvar_def.default, line=t.line, column=t.column, ) special = self.try_analyze_special_unbound_type(t, fullname) if special is not None: return special if isinstance(node, TypeAlias): self.aliases_used.add(fullname) an_args = self.anal_array( t.args, allow_param_spec=True, allow_param_spec_literals=node.has_param_spec_type, allow_unpack=True, # Fixed length unpacks can be used for non-variadic aliases. ) if node.has_param_spec_type and len(node.alias_tvars) == 1: an_args = self.pack_paramspec_args(an_args) disallow_any = self.options.disallow_any_generics and not self.is_typeshed_stub res = instantiate_type_alias( node, an_args, self.fail, node.no_args, t, self.options, unexpanded_type=t, disallow_any=disallow_any, empty_tuple_index=t.empty_tuple_index, ) # The only case where instantiate_type_alias() can return an incorrect instance is # when it is top-level instance, so no need to recurse. if ( isinstance(res, ProperType) and isinstance(res, Instance) and not (self.defining_alias and self.nesting_level == 0) and not validate_instance(res, self.fail, t.empty_tuple_index) ): fix_instance( res, self.fail, self.note, disallow_any=disallow_any, options=self.options, use_generic_error=True, unexpanded_type=t, ) if node.eager: res = get_proper_type(res) return res elif isinstance(node, TypeInfo): return self.analyze_type_with_type_info(node, t.args, t, t.empty_tuple_index) elif node.fullname in TYPE_ALIAS_NAMES: return AnyType(TypeOfAny.special_form) # Concatenate is an operator, no need for a proper type elif node.fullname in CONCATENATE_TYPE_NAMES: # We check the return type further up the stack for valid use locations return self.apply_concatenate_operator(t) else: return self.analyze_unbound_type_without_type_info(t, sym, defining_literal) else: # sym is None return AnyType(TypeOfAny.special_form) def pack_paramspec_args(self, an_args: Sequence[Type]) -> list[Type]: # "Aesthetic" ParamSpec literals for single ParamSpec: C[int, str] -> C[[int, str]]. # These do not support mypy_extensions VarArgs, etc. as they were already analyzed # TODO: should these be re-analyzed to get rid of this inconsistency? count = len(an_args) if count == 0: return [] if count == 1 and isinstance(get_proper_type(an_args[0]), AnyType): # Single Any is interpreted as ..., rather that a single argument with Any type. # I didn't find this in the PEP, but it sounds reasonable. return list(an_args) if any(isinstance(a, (Parameters, ParamSpecType)) for a in an_args): if len(an_args) > 1: first_wrong = next( arg for arg in an_args if isinstance(arg, (Parameters, ParamSpecType)) ) self.fail( "Nested parameter specifications are not allowed", first_wrong, code=codes.VALID_TYPE, ) return [AnyType(TypeOfAny.from_error)] return list(an_args) first = an_args[0] return [ Parameters( an_args, [ARG_POS] * count, [None] * count, line=first.line, column=first.column ) ] def cannot_resolve_type(self, t: UnboundType) -> None: # TODO: Move error message generation to messages.py. We'd first # need access to MessageBuilder here. Also move the similar # message generation logic in semanal.py. self.api.fail(f'Cannot resolve name "{t.name}" (possible cyclic definition)', t) if self.api.is_func_scope(): self.note("Recursive types are not allowed at function scope", t) def apply_concatenate_operator(self, t: UnboundType) -> Type: if len(t.args) == 0: self.api.fail("Concatenate needs type arguments", t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) # Last argument has to be ParamSpec or Ellipsis. ps = self.anal_type(t.args[-1], allow_param_spec=True, allow_ellipsis=True) if not isinstance(ps, (ParamSpecType, Parameters)): if isinstance(ps, UnboundType) and self.allow_unbound_tvars: sym = self.lookup_qualified(ps.name, t) if sym is not None and isinstance(sym.node, ParamSpecExpr): return ps self.api.fail( "The last parameter to Concatenate needs to be a ParamSpec", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) elif isinstance(ps, ParamSpecType) and ps.prefix.arg_types: self.api.fail("Nested Concatenates are invalid", t, code=codes.VALID_TYPE) args = self.anal_array(t.args[:-1]) pre = ps.prefix if isinstance(ps, ParamSpecType) else ps # mypy can't infer this :( names: list[str | None] = [None] * len(args) pre = Parameters( args + pre.arg_types, [ARG_POS] * len(args) + pre.arg_kinds, names + pre.arg_names, line=t.line, column=t.column, ) return ps.copy_modified(prefix=pre) if isinstance(ps, ParamSpecType) else pre def try_analyze_special_unbound_type(self, t: UnboundType, fullname: str) -> Type | None: """Bind special type that is recognized through magic name such as 'typing.Any'. Return the bound type if successful, and return None if the type is a normal type. """ if fullname == "builtins.None": return NoneType() elif fullname == "typing.Any": return AnyType(TypeOfAny.explicit, line=t.line, column=t.column) elif fullname in FINAL_TYPE_NAMES: if self.prohibit_special_class_field_types: self.fail( f"Final[...] can't be used inside a {self.prohibit_special_class_field_types}", t, code=codes.VALID_TYPE, ) else: if not self.allow_final: self.fail( "Final can be only used as an outermost qualifier in a variable annotation", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) elif fullname in TUPLE_NAMES: # Tuple is special because it is involved in builtin import cycle # and may be not ready when used. sym = self.api.lookup_fully_qualified_or_none("builtins.tuple") if not sym or isinstance(sym.node, PlaceholderNode): if self.api.is_incomplete_namespace("builtins"): self.api.record_incomplete_ref() else: self.fail('Name "tuple" is not defined', t) return AnyType(TypeOfAny.special_form) if len(t.args) == 0 and not t.empty_tuple_index: # Bare 'Tuple' is same as 'tuple' any_type = self.get_omitted_any(t) return self.named_type("builtins.tuple", [any_type], line=t.line, column=t.column) if len(t.args) == 2 and isinstance(t.args[1], EllipsisType): # Tuple[T, ...] (uniform, variable-length tuple) instance = self.named_type("builtins.tuple", [self.anal_type(t.args[0])]) instance.line = t.line return instance return self.tuple_type( self.anal_array(t.args, allow_unpack=True), line=t.line, column=t.column ) elif fullname == "typing.Union": items = self.anal_array(t.args) return UnionType.make_union(items, line=t.line, column=t.column) elif fullname == "typing.Optional": if len(t.args) != 1: self.fail( "Optional[...] must have exactly one type argument", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) item = self.anal_type(t.args[0]) return make_optional_type(item) elif fullname == "typing.Callable": return self.analyze_callable_type(t) elif fullname in TYPE_NAMES: if len(t.args) == 0: if fullname == "typing.Type": any_type = self.get_omitted_any(t) return TypeType(any_type, line=t.line, column=t.column) else: # To prevent assignment of 'builtins.type' inferred as 'builtins.object' # See https://github.com/python/mypy/issues/9476 for more information return None type_str = "Type[...]" if fullname == "typing.Type" else "type[...]" if len(t.args) != 1: self.fail( f"{type_str} must have exactly one type argument", t, code=codes.VALID_TYPE ) item = self.anal_type(t.args[0]) bad_item_name = get_bad_type_type_item(item) if bad_item_name: self.fail(f'{type_str} can\'t contain "{bad_item_name}"', t, code=codes.VALID_TYPE) item = AnyType(TypeOfAny.from_error) return TypeType.make_normalized(item, line=t.line, column=t.column) elif fullname in ("typing_extensions.TypeForm", "typing.TypeForm"): if TYPE_FORM not in self.options.enable_incomplete_feature: self.fail( "TypeForm is experimental," " must be enabled with --enable-incomplete-feature=TypeForm", t, ) if len(t.args) == 0: any_type = self.get_omitted_any(t) return TypeType(any_type, line=t.line, column=t.column, is_type_form=True) if len(t.args) != 1: type_str = "TypeForm[...]" self.fail( type_str + " must have exactly one type argument", t, code=codes.VALID_TYPE ) item = self.anal_type(t.args[0]) return TypeType.make_normalized(item, line=t.line, column=t.column, is_type_form=True) elif fullname == "typing.ClassVar": if self.nesting_level > 0: self.fail( "Invalid type: ClassVar nested inside other type", t, code=codes.VALID_TYPE ) if self.prohibit_special_class_field_types: self.fail( f"ClassVar[...] can't be used inside a {self.prohibit_special_class_field_types}", t, code=codes.VALID_TYPE, ) if self.defining_alias: self.fail( "ClassVar[...] can't be used inside a type alias", t, code=codes.VALID_TYPE ) if len(t.args) == 0: return AnyType(TypeOfAny.from_omitted_generics, line=t.line, column=t.column) if len(t.args) != 1: self.fail( "ClassVar[...] must have at most one type argument", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) return self.anal_type(t.args[0], allow_final=self.options.python_version >= (3, 13)) elif fullname in NEVER_NAMES: return UninhabitedType() elif fullname in LITERAL_TYPE_NAMES: return self.analyze_literal_type(t) elif fullname in ANNOTATED_TYPE_NAMES: if len(t.args) < 2: self.fail( "Annotated[...] must have exactly one type argument" " and at least one annotation", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) return self.anal_type( t.args[0], allow_typed_dict_special_forms=self.allow_typed_dict_special_forms ) elif fullname in ("typing_extensions.Required", "typing.Required"): if not self.allow_typed_dict_special_forms: self.fail( "Required[] can be only used in a TypedDict definition", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) if len(t.args) != 1: self.fail( "Required[] must have exactly one type argument", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) return RequiredType( self.anal_type(t.args[0], allow_typed_dict_special_forms=True), required=True ) elif fullname in ("typing_extensions.NotRequired", "typing.NotRequired"): if not self.allow_typed_dict_special_forms: self.fail( "NotRequired[] can be only used in a TypedDict definition", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) if len(t.args) != 1: self.fail( "NotRequired[] must have exactly one type argument", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) return RequiredType( self.anal_type(t.args[0], allow_typed_dict_special_forms=True), required=False ) elif fullname in ("typing_extensions.ReadOnly", "typing.ReadOnly"): if not self.allow_typed_dict_special_forms: self.fail( "ReadOnly[] can be only used in a TypedDict definition", t, code=codes.VALID_TYPE, ) return AnyType(TypeOfAny.from_error) if len(t.args) != 1: self.fail( '"ReadOnly[]" must have exactly one type argument', t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) return ReadOnlyType(self.anal_type(t.args[0], allow_typed_dict_special_forms=True)) elif ( self.anal_type_guard_arg(t, fullname) is not None or self.anal_type_is_arg(t, fullname) is not None ): # In most contexts, TypeGuard[...] acts as an alias for bool (ignoring its args) return self.named_type("builtins.bool") elif fullname in UNPACK_TYPE_NAMES: if len(t.args) != 1: self.fail("Unpack[...] requires exactly one type argument", t) return AnyType(TypeOfAny.from_error) if not self.allow_unpack: self.fail(message_registry.INVALID_UNPACK_POSITION, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) self.allow_type_var_tuple = True result = UnpackType(self.anal_type(t.args[0]), line=t.line, column=t.column) self.allow_type_var_tuple = False return result elif fullname in SELF_TYPE_NAMES: if t.args: self.fail("Self type cannot have type arguments", t) if self.prohibit_self_type is not None: self.fail(f"Self type cannot be used in {self.prohibit_self_type}", t) return AnyType(TypeOfAny.from_error) if self.api.type is None: self.fail("Self type is only allowed in annotations within class definition", t) return AnyType(TypeOfAny.from_error) if self.api.type.has_base("builtins.type"): self.fail("Self type cannot be used in a metaclass", t) if self.api.type.self_type is not None: if self.api.type.is_final or self.api.type.is_enum and self.api.type.enum_members: return fill_typevars(self.api.type) return self.api.type.self_type.copy_modified(line=t.line, column=t.column) # TODO: verify this is unreachable and replace with an assert? self.fail("Unexpected Self type", t) return AnyType(TypeOfAny.from_error) return None def get_omitted_any(self, typ: Type, fullname: str | None = None) -> AnyType: disallow_any = not self.is_typeshed_stub and self.options.disallow_any_generics return get_omitted_any(disallow_any, self.fail, self.note, typ, self.options, fullname) def check_and_warn_deprecated(self, info: TypeInfo, ctx: Context) -> None: """Similar logic to `TypeChecker.check_deprecated` and `TypeChecker.warn_deprecated.""" if ( (deprecated := info.deprecated) and not self.is_typeshed_stub and not (self.api.type and (self.api.type.fullname == info.fullname)) and not any( info.fullname == p or info.fullname.startswith(f"{p}.") for p in self.options.deprecated_calls_exclude ) ): for imp in self.cur_mod_node.imports: if isinstance(imp, ImportFrom) and any(info.name == n[0] for n in imp.names): break else: warn = self.note if self.options.report_deprecated_as_note else self.fail warn(deprecated, ctx, code=codes.DEPRECATED) def analyze_type_with_type_info( self, info: TypeInfo, args: Sequence[Type], ctx: Context, empty_tuple_index: bool ) -> Type: """Bind unbound type when were able to find target TypeInfo. This handles simple cases like 'int', 'modname.UserClass[str]', etc. """ self.check_and_warn_deprecated(info, ctx) if len(args) > 0 and info.fullname == "builtins.tuple": fallback = Instance(info, [AnyType(TypeOfAny.special_form)], ctx.line) return TupleType(self.anal_array(args, allow_unpack=True), fallback, ctx.line) # Analyze arguments and (usually) construct Instance type. The # number of type arguments and their values are # checked only later, since we do not always know the # valid count at this point. Thus we may construct an # Instance with an invalid number of type arguments. # # We allow ParamSpec literals based on a heuristic: it will be # checked later anyways but the error message may be worse. instance = Instance( info, self.anal_array( args, allow_param_spec=True, allow_param_spec_literals=info.has_param_spec_type, allow_unpack=True, # Fixed length tuples can be used for non-variadic types. ), ctx.line, ctx.column, ) instance.end_line = ctx.end_line instance.end_column = ctx.end_column if len(info.type_vars) == 1 and info.has_param_spec_type: instance.args = tuple(self.pack_paramspec_args(instance.args)) # Check type argument count. instance.args = tuple(flatten_nested_tuples(instance.args)) if not (self.defining_alias and self.nesting_level == 0) and not validate_instance( instance, self.fail, empty_tuple_index ): fix_instance( instance, self.fail, self.note, disallow_any=self.options.disallow_any_generics and not self.is_typeshed_stub, options=self.options, ) tup = info.tuple_type if tup is not None: # The class has a Tuple[...] base class so it will be # represented as a tuple type. if info.special_alias: return instantiate_type_alias( info.special_alias, # TODO: should we allow NamedTuples generic in ParamSpec? self.anal_array(args, allow_unpack=True), self.fail, False, ctx, self.options, use_standard_error=True, ) return tup.copy_modified( items=self.anal_array(tup.items, allow_unpack=True), fallback=instance ) td = info.typeddict_type if td is not None: # The class has a TypedDict[...] base class so it will be # represented as a typeddict type. if info.special_alias: return instantiate_type_alias( info.special_alias, # TODO: should we allow TypedDicts generic in ParamSpec? self.anal_array(args, allow_unpack=True), self.fail, False, ctx, self.options, use_standard_error=True, ) # Create a named TypedDictType return td.copy_modified( item_types=self.anal_array(list(td.items.values())), fallback=instance ) if info.fullname == "types.NoneType": self.fail( "NoneType should not be used as a type, please use None instead", ctx, code=codes.NONETYPE_TYPE, ) return NoneType(ctx.line, ctx.column) return instance def analyze_unbound_type_without_type_info( self, t: UnboundType, sym: SymbolTableNode, defining_literal: bool ) -> Type: """Figure out what an unbound type that doesn't refer to a TypeInfo node means. This is something unusual. We try our best to find out what it is. """ name = sym.fullname if name is None: assert sym.node is not None name = sym.node.name # Option 1: # Something with an Any type -- make it an alias for Any in a type # context. This is slightly problematic as it allows using the type 'Any' # as a base class -- however, this will fail soon at runtime so the problem # is pretty minor. if isinstance(sym.node, Var): typ = get_proper_type(sym.node.type) if isinstance(typ, AnyType): return AnyType( TypeOfAny.from_unimported_type, missing_import_name=typ.missing_import_name ) elif self.allow_type_any: if isinstance(typ, Instance) and typ.type.fullname == "builtins.type": return AnyType(TypeOfAny.special_form) if isinstance(typ, TypeType) and isinstance(typ.item, AnyType): return AnyType(TypeOfAny.from_another_any, source_any=typ.item) # Option 2: # Unbound type variable. Currently these may be still valid, # for example when defining a generic type alias. unbound_tvar = ( isinstance(sym.node, (TypeVarExpr, TypeVarTupleExpr)) and self.tvar_scope.get_binding(sym) is None ) if self.allow_unbound_tvars and unbound_tvar: return t # Option 3: # Enum value. Note: we only want to return a LiteralType when # we're using this enum value specifically within context of # a "Literal[...]" type. So, if `defining_literal` is not set, # we bail out early with an error. # # If, in the distant future, we decide to permit things like # `def foo(x: Color.RED) -> None: ...`, we can remove that # check entirely. if ( isinstance(sym.node, Var) and sym.node.info and sym.node.info.is_enum and not sym.node.name.startswith("__") ): value = sym.node.name base_enum_short_name = sym.node.info.name if not defining_literal: msg = message_registry.INVALID_TYPE_RAW_ENUM_VALUE.format( base_enum_short_name, value ) self.fail(msg.value, t, code=msg.code) return AnyType(TypeOfAny.from_error) return LiteralType( value=value, fallback=Instance(sym.node.info, [], line=t.line, column=t.column), line=t.line, column=t.column, ) # None of the above options worked. We parse the args (if there are any) # to make sure there are no remaining semanal-only types, then give up. t = t.copy_modified(args=self.anal_array(t.args)) # TODO: Move this message building logic to messages.py. notes: list[str] = [] error_code = codes.VALID_TYPE if isinstance(sym.node, Var): notes.append( "See https://mypy.readthedocs.io/en/" "stable/common_issues.html#variables-vs-type-aliases" ) message = 'Variable "{}" is not valid as a type' elif isinstance(sym.node, (SYMBOL_FUNCBASE_TYPES, Decorator)): message = 'Function "{}" is not valid as a type' if name == "builtins.any": notes.append('Perhaps you meant "typing.Any" instead of "any"?') elif name == "builtins.callable": notes.append('Perhaps you meant "typing.Callable" instead of "callable"?') else: notes.append('Perhaps you need "Callable[...]" or a callback protocol?') elif isinstance(sym.node, MypyFile): message = 'Module "{}" is not valid as a type' notes.append("Perhaps you meant to use a protocol matching the module structure?") elif unbound_tvar: assert isinstance(sym.node, TypeVarLikeExpr) if sym.node.is_new_style: # PEP 695 type parameters are never considered unbound -- they are undefined # in contexts where they aren't valid, such as in argument default values. message = 'Name "{}" is not defined' name = name.split(".")[-1] error_code = codes.NAME_DEFINED else: message = 'Type variable "{}" is unbound' short = name.split(".")[-1] notes.append( f'(Hint: Use "Generic[{short}]" or "Protocol[{short}]" base class' f' to bind "{short}" inside a class)' ) notes.append( f'(Hint: Use "{short}" in function signature ' f'to bind "{short}" inside a function)' ) else: message = 'Cannot interpret reference "{}" as a type' if not defining_literal: # Literal check already gives a custom error. Avoid duplicating errors. self.fail(message.format(name), t, code=error_code) for note in notes: self.note(note, t, code=error_code) # TODO: Would it be better to always return Any instead of UnboundType # in case of an error? On one hand, UnboundType has a name so error messages # are more detailed, on the other hand, some of them may be bogus, # see https://github.com/python/mypy/issues/4987. return t def visit_any(self, t: AnyType) -> Type: return t def visit_none_type(self, t: NoneType) -> Type: return t def visit_uninhabited_type(self, t: UninhabitedType) -> Type: return t def visit_erased_type(self, t: ErasedType) -> Type: # This type should exist only temporarily during type inference assert False, "Internal error: Unexpected erased type" def visit_deleted_type(self, t: DeletedType) -> Type: return t def visit_type_list(self, t: TypeList) -> Type: # Parameters literal (Z[[int, str, Whatever]]) if self.allow_param_spec_literals: params = self.analyze_callable_args(t) if params: ts, kinds, names = params # bind these types return Parameters(self.anal_array(ts), kinds, names, line=t.line, column=t.column) else: return AnyType(TypeOfAny.from_error) else: self.fail( 'Bracketed expression "[...]" is not valid as a type', t, code=codes.VALID_TYPE ) if len(t.items) == 1: self.note('Did you mean "List[...]"?', t) return AnyType(TypeOfAny.from_error) def visit_callable_argument(self, t: CallableArgument) -> Type: self.fail("Invalid type", t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) def visit_instance(self, t: Instance) -> Type: return t def visit_type_alias_type(self, t: TypeAliasType) -> Type: # TODO: should we do something here? return t def visit_type_var(self, t: TypeVarType) -> Type: return t def visit_param_spec(self, t: ParamSpecType) -> Type: return t def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type: return t def visit_unpack_type(self, t: UnpackType) -> Type: if not self.allow_unpack: self.fail(message_registry.INVALID_UNPACK_POSITION, t.type, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) self.allow_type_var_tuple = True result = UnpackType(self.anal_type(t.type), from_star_syntax=t.from_star_syntax) self.allow_type_var_tuple = False return result def visit_parameters(self, t: Parameters) -> Type: raise NotImplementedError("ParamSpec literals cannot have unbound TypeVars") def visit_callable_type( self, t: CallableType, nested: bool = True, namespace: str = "" ) -> Type: # Every Callable can bind its own type variables, if they're not in the outer scope # TODO: attach namespace for nested free type variables (these appear in return type only). with self.tvar_scope_frame(namespace=namespace): unpacked_kwargs = t.unpack_kwargs if self.defining_alias: variables = t.variables else: variables, _ = self.bind_function_type_variables(t, t) type_guard = self.anal_type_guard(t.ret_type) if t.type_guard is None else t.type_guard type_is = self.anal_type_is(t.ret_type) if t.type_is is None else t.type_is arg_kinds = t.arg_kinds arg_types = [] param_spec_with_args = param_spec_with_kwargs = None param_spec_invalid = False for kind, ut in zip(arg_kinds, t.arg_types): if kind == ARG_STAR: param_spec_with_args, at = self.anal_star_arg_type(ut, kind, nested=nested) elif kind == ARG_STAR2: param_spec_with_kwargs, at = self.anal_star_arg_type(ut, kind, nested=nested) else: if param_spec_with_args: param_spec_invalid = True self.fail( "Arguments not allowed after ParamSpec.args", t, code=codes.VALID_TYPE ) at = self.anal_type(ut, nested=nested, allow_unpack=False) arg_types.append(at) if nested and arg_types: # If we've got a Callable[[Unpack[SomeTypedDict]], None], make sure # Unpack is interpreted as `**` and not as `*`. last = arg_types[-1] if isinstance(last, UnpackType): # TODO: it would be better to avoid this get_proper_type() call. p_at = get_proper_type(last.type) if isinstance(p_at, TypedDictType) and not last.from_star_syntax: # Automatically detect Unpack[Foo] in Callable as backwards # compatible syntax for **Foo, if Foo is a TypedDict. arg_kinds[-1] = ARG_STAR2 arg_types[-1] = p_at unpacked_kwargs = True arg_types = self.check_unpacks_in_list(arg_types) if not param_spec_invalid and param_spec_with_args != param_spec_with_kwargs: # If already invalid, do not report more errors - definition has # to be fixed anyway name = param_spec_with_args or param_spec_with_kwargs self.fail( f'ParamSpec must have "*args" typed as "{name}.args" and "**kwargs" typed as "{name}.kwargs"', t, code=codes.VALID_TYPE, ) param_spec_invalid = True if param_spec_invalid: if ARG_STAR in arg_kinds: arg_types[arg_kinds.index(ARG_STAR)] = AnyType(TypeOfAny.from_error) if ARG_STAR2 in arg_kinds: arg_types[arg_kinds.index(ARG_STAR2)] = AnyType(TypeOfAny.from_error) # If there were multiple (invalid) unpacks, the arg types list will become shorter, # we need to trim the kinds/names as well to avoid crashes. arg_kinds = t.arg_kinds[: len(arg_types)] arg_names = t.arg_names[: len(arg_types)] ret = t.copy_modified( arg_types=arg_types, arg_kinds=arg_kinds, arg_names=arg_names, ret_type=self.anal_type(t.ret_type, nested=nested), # If the fallback isn't filled in yet, # its type will be the falsey FakeInfo fallback=(t.fallback if t.fallback.type else self.named_type("builtins.function")), variables=self.anal_var_defs(variables), type_guard=type_guard, type_is=type_is, unpack_kwargs=unpacked_kwargs, ) return ret def anal_type_guard(self, t: Type) -> Type | None: if isinstance(t, UnboundType): sym = self.lookup_qualified(t.name, t) if sym is not None and sym.node is not None: return self.anal_type_guard_arg(t, sym.node.fullname) # TODO: What if it's an Instance? Then use t.type.fullname? return None def anal_type_guard_arg(self, t: UnboundType, fullname: str) -> Type | None: if fullname in ("typing_extensions.TypeGuard", "typing.TypeGuard"): if len(t.args) != 1: self.fail( "TypeGuard must have exactly one type argument", t, code=codes.VALID_TYPE ) return AnyType(TypeOfAny.from_error) return self.anal_type(t.args[0]) return None def anal_type_is(self, t: Type) -> Type | None: if isinstance(t, UnboundType): sym = self.lookup_qualified(t.name, t) if sym is not None and sym.node is not None: return self.anal_type_is_arg(t, sym.node.fullname) # TODO: What if it's an Instance? Then use t.type.fullname? return None def anal_type_is_arg(self, t: UnboundType, fullname: str) -> Type | None: if fullname in ("typing_extensions.TypeIs", "typing.TypeIs"): if len(t.args) != 1: self.fail("TypeIs must have exactly one type argument", t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) return self.anal_type(t.args[0]) return None def anal_star_arg_type(self, t: Type, kind: ArgKind, nested: bool) -> tuple[str | None, Type]: """Analyze signature argument type for *args and **kwargs argument.""" if isinstance(t, UnboundType) and t.name and "." in t.name and not t.args: components = t.name.split(".") tvar_name = ".".join(components[:-1]) sym = self.lookup_qualified(tvar_name, t) if sym is not None and isinstance(sym.node, ParamSpecExpr): tvar_def = self.tvar_scope.get_binding(sym) if isinstance(tvar_def, ParamSpecType): if kind == ARG_STAR: make_paramspec = paramspec_args if components[-1] != "args": self.fail( f'Use "{tvar_name}.args" for variadic "*" parameter', t, code=codes.VALID_TYPE, ) elif kind == ARG_STAR2: make_paramspec = paramspec_kwargs if components[-1] != "kwargs": self.fail( f'Use "{tvar_name}.kwargs" for variadic "**" parameter', t, code=codes.VALID_TYPE, ) else: assert False, kind return tvar_name, make_paramspec( tvar_def.name, tvar_def.fullname, tvar_def.id, named_type_func=self.named_type, line=t.line, column=t.column, ) return None, self.anal_type(t, nested=nested, allow_unpack=True) def visit_overloaded(self, t: Overloaded) -> Type: # Overloaded types are manually constructed in semanal.py by analyzing the # AST and combining together the Callable types this visitor converts. # # So if we're ever asked to reanalyze an Overloaded type, we know it's # fine to just return it as-is. return t def visit_tuple_type(self, t: TupleType) -> Type: # Types such as (t1, t2, ...) only allowed in assignment statements. They'll # generate errors elsewhere, and Tuple[t1, t2, ...] must be used instead. if t.implicit and not self.allow_tuple_literal: self.fail("Syntax error in type annotation", t, code=codes.SYNTAX) if len(t.items) == 0: self.note( "Suggestion: Use Tuple[()] instead of () for an empty tuple, or " "None for a function without a return value", t, code=codes.SYNTAX, ) elif len(t.items) == 1: self.note("Suggestion: Is there a spurious trailing comma?", t, code=codes.SYNTAX) else: self.note( "Suggestion: Use Tuple[T1, ..., Tn] instead of (T1, ..., Tn)", t, code=codes.SYNTAX, ) return AnyType(TypeOfAny.from_error) any_type = AnyType(TypeOfAny.special_form) # If the fallback isn't filled in yet, its type will be the falsey FakeInfo fallback = ( t.partial_fallback if t.partial_fallback.type else self.named_type("builtins.tuple", [any_type]) ) return TupleType(self.anal_array(t.items, allow_unpack=True), fallback, t.line) def visit_typeddict_type(self, t: TypedDictType) -> Type: req_keys = set() readonly_keys = set() items = {} for item_name, item_type in t.items.items(): # TODO: rework analyzed = self.anal_type(item_type, allow_typed_dict_special_forms=True) if isinstance(analyzed, RequiredType): if analyzed.required: req_keys.add(item_name) analyzed = analyzed.item else: # Keys are required by default. req_keys.add(item_name) if isinstance(analyzed, ReadOnlyType): readonly_keys.add(item_name) analyzed = analyzed.item items[item_name] = analyzed if t.fallback.type is MISSING_FALLBACK: # anonymous/inline TypedDict if INLINE_TYPEDDICT not in self.options.enable_incomplete_feature: self.fail( "Inline TypedDict is experimental," " must be enabled with --enable-incomplete-feature=InlineTypedDict", t, ) required_keys = req_keys fallback = self.named_type("typing._TypedDict") for typ in t.extra_items_from: analyzed = self.analyze_type(typ) p_analyzed = get_proper_type(analyzed) if not isinstance(p_analyzed, TypedDictType): if not isinstance(p_analyzed, (AnyType, PlaceholderType)): self.fail("Can only merge-in other TypedDict", t, code=codes.VALID_TYPE) continue for sub_item_name, sub_item_type in p_analyzed.items.items(): if sub_item_name in items: self.fail(TYPEDDICT_OVERRIDE_MERGE.format(sub_item_name), t) continue items[sub_item_name] = sub_item_type if sub_item_name in p_analyzed.required_keys: req_keys.add(sub_item_name) if sub_item_name in p_analyzed.readonly_keys: readonly_keys.add(sub_item_name) else: required_keys = t.required_keys fallback = t.fallback return TypedDictType(items, required_keys, readonly_keys, fallback, t.line, t.column) def visit_raw_expression_type(self, t: RawExpressionType) -> Type: # We should never see a bare Literal. We synthesize these raw literals # in the earlier stages of semantic analysis, but those # "fake literals" should always be wrapped in an UnboundType # corresponding to 'Literal'. # # Note: if at some point in the distant future, we decide to # make signatures like "foo(x: 20) -> None" legal, we can change # this method so it generates and returns an actual LiteralType # instead. if self.report_invalid_types: if t.base_type_name in ("builtins.int", "builtins.bool"): # The only time it makes sense to use an int or bool is inside of # a literal type. msg = f"Invalid type: try using Literal[{repr(t.literal_value)}] instead?" elif t.base_type_name in ("builtins.float", "builtins.complex"): # We special-case warnings for floats and complex numbers. msg = f"Invalid type: {t.simple_name()} literals cannot be used as a type" else: # And in all other cases, we default to a generic error message. # Note: the reason why we use a generic error message for strings # but not ints or bools is because whenever we see an out-of-place # string, it's unclear if the user meant to construct a literal type # or just misspelled a regular type. So we avoid guessing. msg = "Invalid type comment or annotation" self.fail(msg, t, code=codes.VALID_TYPE) if t.note is not None: self.note(t.note, t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error, line=t.line, column=t.column) def visit_literal_type(self, t: LiteralType) -> Type: return t def visit_union_type(self, t: UnionType) -> Type: if ( t.uses_pep604_syntax is True and t.is_evaluated is True and not self.always_allow_new_syntax and not self.options.python_version >= (3, 10) ): self.fail("X | Y syntax for unions requires Python 3.10", t, code=codes.SYNTAX) return UnionType(self.anal_array(t.items), t.line, uses_pep604_syntax=t.uses_pep604_syntax) def visit_partial_type(self, t: PartialType) -> Type: assert False, "Internal error: Unexpected partial type" def visit_ellipsis_type(self, t: EllipsisType) -> Type: if self.allow_ellipsis or self.allow_param_spec_literals: any_type = AnyType(TypeOfAny.explicit) return Parameters( [any_type, any_type], [ARG_STAR, ARG_STAR2], [None, None], is_ellipsis_args=True ) else: self.fail('Unexpected "..."', t) return AnyType(TypeOfAny.from_error) def visit_type_type(self, t: TypeType) -> Type: return TypeType.make_normalized( self.anal_type(t.item), line=t.line, is_type_form=t.is_type_form ) def visit_placeholder_type(self, t: PlaceholderType) -> Type: n = ( None # No dot in fullname indicates we are at function scope, and recursive # types are not supported there anyway, so we just give up. if not t.fullname or "." not in t.fullname else self.api.lookup_fully_qualified(t.fullname) ) if not n or isinstance(n.node, PlaceholderNode): self.api.defer() # Still incomplete return t else: # TODO: Handle non-TypeInfo assert isinstance(n.node, TypeInfo) return self.analyze_type_with_type_info(n.node, t.args, t, False) def analyze_callable_args_for_paramspec( self, callable_args: Type, ret_type: Type, fallback: Instance ) -> CallableType | None: """Construct a 'Callable[P, RET]', where P is ParamSpec, return None if we cannot.""" if not isinstance(callable_args, UnboundType): return None sym = self.lookup_qualified(callable_args.name, callable_args) if sym is None: return None tvar_def = self.tvar_scope.get_binding(sym) if not isinstance(tvar_def, ParamSpecType): if ( tvar_def is None and self.allow_unbound_tvars and isinstance(sym.node, ParamSpecExpr) ): # We are analyzing this type in runtime context (e.g. as type application). # If it is not valid as a type in this position an error will be given later. return callable_with_ellipsis( AnyType(TypeOfAny.explicit), ret_type=ret_type, fallback=fallback ) return None elif ( self.defining_alias and self.not_declared_in_type_params(tvar_def.name) and tvar_def not in self.allowed_alias_tvars ): if self.python_3_12_type_alias: msg = message_registry.TYPE_PARAMETERS_SHOULD_BE_DECLARED.format( f'"{tvar_def.name}"' ) else: msg = f'ParamSpec "{tvar_def.name}" is not included in type_params' self.fail(msg, callable_args, code=codes.VALID_TYPE) return callable_with_ellipsis( AnyType(TypeOfAny.special_form), ret_type=ret_type, fallback=fallback ) return CallableType( [ paramspec_args( tvar_def.name, tvar_def.fullname, tvar_def.id, named_type_func=self.named_type ), paramspec_kwargs( tvar_def.name, tvar_def.fullname, tvar_def.id, named_type_func=self.named_type ), ], [nodes.ARG_STAR, nodes.ARG_STAR2], [None, None], ret_type=ret_type, fallback=fallback, ) def analyze_callable_args_for_concatenate( self, callable_args: Type, ret_type: Type, fallback: Instance ) -> CallableType | AnyType | None: """Construct a 'Callable[C, RET]', where C is Concatenate[..., P], returning None if we cannot. """ if not isinstance(callable_args, UnboundType): return None sym = self.lookup_qualified(callable_args.name, callable_args) if sym is None: return None if sym.node is None: return None if sym.node.fullname not in CONCATENATE_TYPE_NAMES: return None tvar_def = self.anal_type(callable_args, allow_param_spec=True) if not isinstance(tvar_def, (ParamSpecType, Parameters)): if self.allow_unbound_tvars and isinstance(tvar_def, UnboundType): sym = self.lookup_qualified(tvar_def.name, callable_args) if sym is not None and isinstance(sym.node, ParamSpecExpr): # We are analyzing this type in runtime context (e.g. as type application). # If it is not valid as a type in this position an error will be given later. return callable_with_ellipsis( AnyType(TypeOfAny.explicit), ret_type=ret_type, fallback=fallback ) # Error was already given, so prevent further errors. return AnyType(TypeOfAny.from_error) if isinstance(tvar_def, Parameters): # This comes from Concatenate[int, ...] return CallableType( arg_types=tvar_def.arg_types, arg_names=tvar_def.arg_names, arg_kinds=tvar_def.arg_kinds, ret_type=ret_type, fallback=fallback, from_concatenate=True, ) # ick, CallableType should take ParamSpecType prefix = tvar_def.prefix # we don't set the prefix here as generic arguments will get updated at some point # in the future. CallableType.param_spec() accounts for this. return CallableType( [ *prefix.arg_types, paramspec_args( tvar_def.name, tvar_def.fullname, tvar_def.id, named_type_func=self.named_type ), paramspec_kwargs( tvar_def.name, tvar_def.fullname, tvar_def.id, named_type_func=self.named_type ), ], [*prefix.arg_kinds, nodes.ARG_STAR, nodes.ARG_STAR2], [*prefix.arg_names, None, None], ret_type=ret_type, fallback=fallback, from_concatenate=True, ) def analyze_callable_type(self, t: UnboundType) -> Type: fallback = self.named_type("builtins.function") if len(t.args) == 0: # Callable (bare). Treat as Callable[..., Any]. any_type = self.get_omitted_any(t) ret = callable_with_ellipsis(any_type, any_type, fallback) elif len(t.args) == 2: callable_args = t.args[0] ret_type = t.args[1] if isinstance(callable_args, TypeList): # Callable[[ARG, ...], RET] (ordinary callable type) analyzed_args = self.analyze_callable_args(callable_args) if analyzed_args is None: return AnyType(TypeOfAny.from_error) args, kinds, names = analyzed_args ret = CallableType(args, kinds, names, ret_type=ret_type, fallback=fallback) elif isinstance(callable_args, EllipsisType): # Callable[..., RET] (with literal ellipsis; accept arbitrary arguments) ret = callable_with_ellipsis( AnyType(TypeOfAny.explicit), ret_type=ret_type, fallback=fallback ) else: # Callable[P, RET] (where P is ParamSpec) with self.tvar_scope_frame(namespace=""): # Temporarily bind ParamSpecs to allow code like this: # my_fun: Callable[Q, Foo[Q]] # We usually do this later in visit_callable_type(), but the analysis # below happens at very early stage. variables = [] for name, tvar_expr in self.find_type_var_likes(callable_args): variables.append( self.tvar_scope.bind_new(name, tvar_expr, self.fail_func, t) ) maybe_ret = self.analyze_callable_args_for_paramspec( callable_args, ret_type, fallback ) or self.analyze_callable_args_for_concatenate( callable_args, ret_type, fallback ) if isinstance(maybe_ret, CallableType): maybe_ret = maybe_ret.copy_modified(variables=variables) if maybe_ret is None: # Callable[?, RET] (where ? is something invalid) self.fail( "The first argument to Callable must be a " 'list of types, parameter specification, or "..."', t, code=codes.VALID_TYPE, ) self.note( "See https://mypy.readthedocs.io/en/stable/kinds_of_types.html#callable-types-and-lambdas", t, ) return AnyType(TypeOfAny.from_error) elif isinstance(maybe_ret, AnyType): return maybe_ret ret = maybe_ret else: if self.options.disallow_any_generics: self.fail('Please use "Callable[[<parameters>], <return type>]"', t) else: self.fail('Please use "Callable[[<parameters>], <return type>]" or "Callable"', t) return AnyType(TypeOfAny.from_error) assert isinstance(ret, CallableType) return ret.accept(self) def refers_to_full_names(self, arg: UnboundType, names: Sequence[str]) -> bool: sym = self.lookup_qualified(arg.name, arg) if sym is not None: if sym.fullname in names: return True return False def analyze_callable_args( self, arglist: TypeList ) -> tuple[list[Type], list[ArgKind], list[str | None]] | None: args: list[Type] = [] kinds: list[ArgKind] = [] names: list[str | None] = [] seen_unpack = False unpack_types: list[Type] = [] invalid_unpacks: list[Type] = [] second_unpack_last = False for i, arg in enumerate(arglist.items): if isinstance(arg, CallableArgument): args.append(arg.typ) names.append(arg.name) if arg.constructor is None: return None found = self.lookup_qualified(arg.constructor, arg) if found is None: # Looking it up already put an error message in return None elif found.fullname not in ARG_KINDS_BY_CONSTRUCTOR: self.fail(f'Invalid argument constructor "{found.fullname}"', arg) return None else: assert found.fullname is not None kind = ARG_KINDS_BY_CONSTRUCTOR[found.fullname] kinds.append(kind) if arg.name is not None and kind.is_star(): self.fail(f"{arg.constructor} arguments should not have names", arg) return None elif ( isinstance(arg, UnboundType) and self.refers_to_full_names(arg, UNPACK_TYPE_NAMES) or isinstance(arg, UnpackType) ): if seen_unpack: # Multiple unpacks, preserve them, so we can give an error later. if i == len(arglist.items) - 1 and not invalid_unpacks: # Special case: if there are just two unpacks, and the second one appears # as last type argument, it can be still valid, if the second unpacked type # is a TypedDict. This should be checked by the caller. second_unpack_last = True invalid_unpacks.append(arg) continue seen_unpack = True unpack_types.append(arg) else: if seen_unpack: unpack_types.append(arg) else: args.append(arg) kinds.append(ARG_POS) names.append(None) if seen_unpack: if len(unpack_types) == 1: args.append(unpack_types[0]) else: first = unpack_types[0] if isinstance(first, UnpackType): # UnpackType doesn't have its own line/column numbers, # so use the unpacked type for error messages. first = first.type args.append( UnpackType(self.tuple_type(unpack_types, line=first.line, column=first.column)) ) kinds.append(ARG_STAR) names.append(None) for arg in invalid_unpacks: args.append(arg) kinds.append(ARG_STAR2 if second_unpack_last else ARG_STAR) names.append(None) # Note that arglist below is only used for error context. check_arg_names(names, [arglist] * len(args), self.fail, "Callable") check_arg_kinds(kinds, [arglist] * len(args), self.fail) return args, kinds, names def analyze_literal_type(self, t: UnboundType) -> Type: if len(t.args) == 0: self.fail("Literal[...] must have at least one parameter", t, code=codes.VALID_TYPE) return AnyType(TypeOfAny.from_error) output: list[Type] = [] for i, arg in enumerate(t.args): analyzed_types = self.analyze_literal_param(i + 1, arg, t) if analyzed_types is None: return AnyType(TypeOfAny.from_error) else: output.extend(analyzed_types) return UnionType.make_union(output, line=t.line) def analyze_literal_param(self, idx: int, arg: Type, ctx: Context) -> list[Type] | None: # This UnboundType was originally defined as a string. if ( isinstance(arg, ProperType) and isinstance(arg, (UnboundType, UnionType)) and arg.original_str_expr is not None ): assert arg.original_str_fallback is not None return [ LiteralType( value=arg.original_str_expr, fallback=self.named_type(arg.original_str_fallback), line=arg.line, column=arg.column, ) ] # If arg is an UnboundType that was *not* originally defined as # a string, try expanding it in case it's a type alias or something. if isinstance(arg, UnboundType): self.nesting_level += 1 try: arg = self.visit_unbound_type(arg, defining_literal=True) finally: self.nesting_level -= 1 # Literal[...] cannot contain Any. Give up and add an error message # (if we haven't already). arg = get_proper_type(arg) if isinstance(arg, AnyType): # Note: We can encounter Literals containing 'Any' under three circumstances: # # 1. If the user attempts use an explicit Any as a parameter # 2. If the user is trying to use an enum value imported from a module with # no type hints, giving it an implicit type of 'Any' # 3. If there's some other underlying problem with the parameter. # # We report an error in only the first two cases. In the third case, we assume # some other region of the code has already reported a more relevant error. # # TODO: Once we start adding support for enums, make sure we report a custom # error for case 2 as well. if arg.type_of_any not in (TypeOfAny.from_error, TypeOfAny.special_form): self.fail( f'Parameter {idx} of Literal[...] cannot be of type "Any"', ctx, code=codes.VALID_TYPE, ) return None elif isinstance(arg, RawExpressionType): # A raw literal. Convert it directly into a literal if we can. if arg.literal_value is None: name = arg.simple_name() if name in ("float", "complex"): msg = f'Parameter {idx} of Literal[...] cannot be of type "{name}"' else: msg = "Invalid type: Literal[...] cannot contain arbitrary expressions" self.fail(msg, ctx, code=codes.VALID_TYPE) # Note: we deliberately ignore arg.note here: the extra info might normally be # helpful, but it generally won't make sense in the context of a Literal[...]. return None # Remap bytes and unicode into the appropriate type for the correct Python version fallback = self.named_type(arg.base_type_name) assert isinstance(fallback, Instance) return [LiteralType(arg.literal_value, fallback, line=arg.line, column=arg.column)] elif isinstance(arg, (NoneType, LiteralType)): # Types that we can just add directly to the literal/potential union of literals. return [arg] elif isinstance(arg, Instance) and arg.last_known_value is not None: # Types generated from declarations like "var: Final = 4". return [arg.last_known_value] elif isinstance(arg, UnionType): out = [] for union_arg in arg.items: union_result = self.analyze_literal_param(idx, union_arg, ctx) if union_result is None: return None out.extend(union_result) return out else: self.fail(f"Parameter {idx} of Literal[...] is invalid", ctx, code=codes.VALID_TYPE) return None def analyze_type(self, typ: Type) -> Type: return typ.accept(self) def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None: self.fail_func(msg, ctx, code=code) def note(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None: self.note_func(msg, ctx, code=code) @contextmanager def tvar_scope_frame(self, namespace: str) -> Iterator[None]: old_scope = self.tvar_scope self.tvar_scope = self.tvar_scope.method_frame(namespace) yield self.tvar_scope = old_scope def find_type_var_likes(self, t: Type) -> TypeVarLikeList: visitor = FindTypeVarVisitor(self.api, self.tvar_scope) t.accept(visitor) return visitor.type_var_likes def infer_type_variables( self, type: CallableType ) -> tuple[list[tuple[str, TypeVarLikeExpr]], bool]: """Infer type variables from a callable. Return tuple with these items: - list of unique type variables referred to in a callable - whether there is a reference to the Self type """ visitor = FindTypeVarVisitor(self.api, self.tvar_scope) for arg in type.arg_types: arg.accept(visitor) # When finding type variables in the return type of a function, don't # look inside Callable types. Type variables only appearing in # functions in the return type belong to those functions, not the # function we're currently analyzing. visitor.include_callables = False type.ret_type.accept(visitor) return visitor.type_var_likes, visitor.has_self_type def bind_function_type_variables( self, fun_type: CallableType, defn: Context ) -> tuple[tuple[TypeVarLikeType, ...], bool]: """Find the type variables of the function type and bind them in our tvar_scope""" has_self_type = False if fun_type.variables: defs = [] for var in fun_type.variables: if self.api.type and self.api.type.self_type and var == self.api.type.self_type: has_self_type = True continue var_node = self.lookup_qualified(var.name, defn) assert var_node, "Binding for function type variable not found within function" var_expr = var_node.node assert isinstance(var_expr, TypeVarLikeExpr) binding = self.tvar_scope.bind_new(var.name, var_expr, self.fail_func, fun_type) defs.append(binding) return tuple(defs), has_self_type typevars, has_self_type = self.infer_type_variables(fun_type) # Do not define a new type variable if already defined in scope. typevars = [ (name, tvar) for name, tvar in typevars if not self.is_defined_type_var(name, defn) ] defs = [] for name, tvar in typevars: if not self.tvar_scope.allow_binding(tvar.fullname): err_msg = message_registry.TYPE_VAR_REDECLARED_IN_NESTED_CLASS.format(name) self.fail(err_msg.value, defn, code=err_msg.code) binding = self.tvar_scope.bind_new(name, tvar, self.fail_func, fun_type) defs.append(binding) return tuple(defs), has_self_type def is_defined_type_var(self, tvar: str, context: Context) -> bool: tvar_node = self.lookup_qualified(tvar, context) if not tvar_node: return False return self.tvar_scope.get_binding(tvar_node) is not None def anal_array( self, a: Iterable[Type], nested: bool = True, *, allow_param_spec: bool = False, allow_param_spec_literals: bool = False, allow_unpack: bool = False, ) -> list[Type]: old_allow_param_spec_literals = self.allow_param_spec_literals self.allow_param_spec_literals = allow_param_spec_literals res: list[Type] = [] for t in a: res.append( self.anal_type( t, nested, allow_param_spec=allow_param_spec, allow_unpack=allow_unpack ) ) self.allow_param_spec_literals = old_allow_param_spec_literals return self.check_unpacks_in_list(res) def anal_type( self, t: Type, nested: bool = True, *, allow_param_spec: bool = False, allow_unpack: bool = False, allow_ellipsis: bool = False, allow_typed_dict_special_forms: bool = False, allow_final: bool = False, ) -> Type: if nested: self.nesting_level += 1 old_allow_typed_dict_special_forms = self.allow_typed_dict_special_forms self.allow_typed_dict_special_forms = allow_typed_dict_special_forms self.allow_final = allow_final old_allow_ellipsis = self.allow_ellipsis self.allow_ellipsis = allow_ellipsis old_allow_unpack = self.allow_unpack self.allow_unpack = allow_unpack try: analyzed = t.accept(self) finally: if nested: self.nesting_level -= 1 self.allow_typed_dict_special_forms = old_allow_typed_dict_special_forms self.allow_ellipsis = old_allow_ellipsis self.allow_unpack = old_allow_unpack if ( not allow_param_spec and isinstance(analyzed, ParamSpecType) and analyzed.flavor == ParamSpecFlavor.BARE ): if analyzed.prefix.arg_types: self.fail("Invalid location for Concatenate", t, code=codes.VALID_TYPE) self.note("You can use Concatenate as the first argument to Callable", t) analyzed = AnyType(TypeOfAny.from_error) else: self.fail( INVALID_PARAM_SPEC_LOCATION.format(format_type(analyzed, self.options)), t, code=codes.VALID_TYPE, ) self.note( INVALID_PARAM_SPEC_LOCATION_NOTE.format(analyzed.name), t, code=codes.VALID_TYPE, ) analyzed = AnyType(TypeOfAny.from_error) return analyzed def anal_var_def(self, var_def: TypeVarLikeType) -> TypeVarLikeType: if isinstance(var_def, TypeVarType): return TypeVarType( name=var_def.name, fullname=var_def.fullname, id=var_def.id, values=self.anal_array(var_def.values), upper_bound=var_def.upper_bound.accept(self), default=var_def.default.accept(self), variance=var_def.variance, line=var_def.line, column=var_def.column, ) else: return var_def def anal_var_defs(self, var_defs: Sequence[TypeVarLikeType]) -> list[TypeVarLikeType]: return [self.anal_var_def(vd) for vd in var_defs] def named_type( self, fullname: str, args: list[Type] | None = None, line: int = -1, column: int = -1 ) -> Instance: node = self.lookup_fully_qualified(fullname) assert isinstance(node.node, TypeInfo) any_type = AnyType(TypeOfAny.special_form) if args is not None: args = self.check_unpacks_in_list(args) return Instance( node.node, args or [any_type] * len(node.node.defn.type_vars), line=line, column=column ) def check_unpacks_in_list(self, items: list[Type]) -> list[Type]: new_items: list[Type] = [] num_unpacks = 0 final_unpack = None for item in items: # TODO: handle forward references here, they appear as Unpack[Any]. if isinstance(item, UnpackType) and not isinstance( get_proper_type(item.type), TupleType ): if not num_unpacks: new_items.append(item) num_unpacks += 1 final_unpack = item else: new_items.append(item) if num_unpacks > 1: assert final_unpack is not None self.fail("More than one variadic Unpack in a type is not allowed", final_unpack.type) return new_items def tuple_type(self, items: list[Type], line: int, column: int) -> TupleType: any_type = AnyType(TypeOfAny.special_form) return TupleType( items, fallback=self.named_type("builtins.tuple", [any_type]), line=line, column=column ) TypeVarLikeList = list[tuple[str, TypeVarLikeExpr]]
TypeAnalyser
python
Netflix__metaflow
metaflow/plugins/cards/exception.py
{ "start": 4485, "end": 5222 }
class ____(MetaflowException): headline = "Component overwrite is not supported" def __init__(self, component_id, card_id, card_type): id_str = "" if card_id is not None: id_str = "id='%s'" % card_id msg = ( "Card component overwrite is not supported. " "Component with id %s already exists in the @card(type='%s', %s). \n" "Instead of calling `current.card.components[ID] = MyComponent`. " "You can overwrite the entire component Array by calling " "`current.card.components = [MyComponent]`" ) % (component_id, card_type, id_str) super().__init__( msg=msg, )
ComponentOverwriteNotSupportedException
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/suite/test_select.py
{ "start": 2520, "end": 5056 }
class ____(fixtures.TablesTest): """Test the dialect sends appropriate ORDER BY expressions when labels are used. This essentially exercises the "supports_simple_order_by_label" setting. """ __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("id", Integer, primary_key=True), Column("x", Integer), Column("y", Integer), Column("q", String(50)), Column("p", String(50)), ) @classmethod def insert_data(cls, connection): connection.execute( cls.tables.some_table.insert(), [ {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"}, {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"}, {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"}, ], ) def _assert_result(self, select, result): with config.db.connect() as conn: eq_(conn.execute(select).fetchall(), result) def test_plain(self): table = self.tables.some_table lx = table.c.x.label("lx") self._assert_result(select(lx).order_by(lx), [(1,), (2,), (3,)]) def test_composed_int(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") self._assert_result(select(lx).order_by(lx), [(3,), (5,), (7,)]) def test_composed_multiple(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") ly = (func.lower(table.c.q) + table.c.p).label("ly") self._assert_result( select(lx, ly).order_by(lx, ly.desc()), [(3, "q1p3"), (5, "q2p2"), (7, "q3p1")], ) def test_plain_desc(self): table = self.tables.some_table lx = table.c.x.label("lx") self._assert_result(select(lx).order_by(lx.desc()), [(3,), (2,), (1,)]) def test_composed_int_desc(self): table = self.tables.some_table lx = (table.c.x + table.c.y).label("lx") self._assert_result(select(lx).order_by(lx.desc()), [(7,), (5,), (3,)]) @testing.requires.group_by_complex_expression def test_group_by_composed(self): table = self.tables.some_table expr = (table.c.x + table.c.y).label("lx") stmt = ( select(func.count(table.c.id), expr).group_by(expr).order_by(expr) ) self._assert_result(stmt, [(1, 3), (1, 5), (1, 7)])
OrderByLabelTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/events.py
{ "start": 26075, "end": 27563 }
class ____(Exception): """Event indicating op failure. Raise events of this type from within op compute functions or custom type checks in order to indicate an unrecoverable failure in user code to the Dagster machinery and return structured metadata about the failure. Args: description (Optional[str]): A human-readable description of the failure. metadata (Optional[Dict[str, RawMetadataValue]]): Arbitrary metadata about the failure. Keys are displayed string labels, and values are one of the following: string, float, int, JSON-serializable dict, JSON-serializable list, and one of the data classes returned by a MetadataValue static method. allow_retries (Optional[bool]): Whether this Failure should respect the retry policy or bypass it and immediately fail. Defaults to True, respecting the retry policy and allowing retries. """ def __init__( self, description: Optional[str] = None, metadata: Optional[Mapping[str, RawMetadataValue]] = None, allow_retries: Optional[bool] = None, ): super().__init__(description) self.description = check.opt_str_param(description, "description") self.metadata = normalize_metadata( check.opt_mapping_param(metadata, "metadata", key_type=str), ) self.allow_retries = check.opt_bool_param(allow_retries, "allow_retries", True) @public
Failure
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 203720, "end": 208657 }
class ____(TypedDict, total=False): """ :class:`altair.PointSelectionConfig` ``TypedDict`` wrapper. Parameters ---------- type Determines the default event processing and data query for the selection. Vega-Lite currently supports two selection types: * ``"point"`` -- to select multiple discrete data values; the first value is selected on ``click`` and additional values toggled on shift-click. * ``"interval"`` -- to select a continuous range of data values on ``drag``. clear Clears the selection, emptying it of all values. This property can be a `Event Stream <https://vega.github.io/vega/docs/event-streams/>`__ or ``false`` to disable clear. **Default value:** ``dblclick``. **See also:** `clear examples <https://vega.github.io/vega-lite/docs/selection.html#clear>`__ in the documentation. encodings An array of encoding channels. The corresponding data field values must match for a data tuple to fall within the selection. **See also:** The `projection with encodings and fields section <https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the documentation. fields An array of field names whose values must match for a data tuple to fall within the selection. **See also:** The `projection with encodings and fields section <https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the documentation. nearest When true, an invisible voronoi diagram is computed to accelerate discrete selection. The data value *nearest* the mouse cursor is added to the selection. **Default value:** ``false``, which means that data values must be interacted with directly (e.g., clicked on) to be added to the selection. **See also:** `nearest examples <https://vega.github.io/vega-lite/docs/selection.html#nearest>`__ documentation. on A `Vega event stream <https://vega.github.io/vega/docs/event-streams/>`__ (object or selector) that triggers the selection. For interval selections, the event stream must specify a `start and end <https://vega.github.io/vega/docs/event-streams/#between-filters>`__. **See also:** `on examples <https://vega.github.io/vega-lite/docs/selection.html#on>`__ in the documentation. resolve With layered and multi-view displays, a strategy that determines how selections' data queries are resolved when applied in a filter transform, conditional encoding rule, or scale domain. One of: * ``"global"`` -- only one brush exists for the entire SPLOM. When the user begins to drag, any previous brushes are cleared, and a new one is constructed. * ``"union"`` -- each cell contains its own brush, and points are highlighted if they lie within *any* of these individual brushes. * ``"intersect"`` -- each cell contains its own brush, and points are highlighted only if they fall within *all* of these individual brushes. **Default value:** ``global``. **See also:** `resolve examples <https://vega.github.io/vega-lite/docs/selection.html#resolve>`__ in the documentation. toggle Controls whether data values should be toggled (inserted or removed from a point selection) or only ever inserted into point selections. One of: * ``true`` -- the default behavior, which corresponds to ``"event.shiftKey"``. As a result, data values are toggled when the user interacts with the shift-key pressed. * ``false`` -- disables toggling behaviour; the selection will only ever contain a single data value corresponding to the most recent interaction. * A `Vega expression <https://vega.github.io/vega/docs/expressions/>`__ which is re-evaluated as the user interacts. If the expression evaluates to ``true``, the data value is toggled into or out of the point selection. If the expression evaluates to ``false``, the point selection is first cleared, and the data value is then inserted. For example, setting the value to the Vega expression ``"true"`` will toggle data values without the user pressing the shift-key. **Default value:** ``true`` **See also:** `toggle examples <https://vega.github.io/vega-lite/docs/selection.html#toggle>`__ in the documentation. """ type: Literal["point"] clear: str | bool | MergedStreamKwds | DerivedStreamKwds encodings: Sequence[SingleDefUnitChannel_T] fields: Sequence[str] nearest: bool on: str | MergedStreamKwds | DerivedStreamKwds resolve: SelectionResolution_T toggle: str | bool
PointSelectionConfigKwds
python
bokeh__bokeh
tests/unit/bokeh/document/test_events__document.py
{ "start": 10261, "end": 12478 }
class ____: def test_init(self) -> None: doc = Document() m = SomeModel() e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker") assert e.document == doc assert e.model == m assert e.attr == "data" assert e.data == dict(foo=1) assert e.rollover == 200 assert e.setter == "setter" assert e.callback_invoker == "invoker" def test_kind(self) -> None: assert bde.ColumnsStreamedEvent.kind == "ColumnsStreamed" def test_to_serializable(self) -> None: doc = Document() m = SomeModel() e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker") s = Serializer() r = s.encode(e) assert r == dict(kind=e.kind, model=m.ref, attr="data", data=MapRep(type="map", entries=[("foo", 1)]), rollover=200) assert s.buffers == [] def test_dispatch(self) -> None: doc = Document() m = SomeModel() e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker") e.dispatch(FakeEmptyDispatcher()) d = FakeFullDispatcher() e.dispatch(d) assert d.called == ['_document_changed', '_document_patched', '_columns_streamed'] def test_combine_ignores_all(self) -> None: doc = Document() m = SomeModel() e = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=1), 200, "setter", "invoker") e2 = bde.ColumnsStreamedEvent(doc, m, "data", dict(foo=2), 300, "setter", "invoker") assert e.combine(e2) is False assert e.model is m assert e.attr == "data" assert e.data == dict(foo=1) assert e.rollover == 200 def test_pandas_data(self) -> None: pd = pytest.importorskip("pandas") doc = Document() m = SomeModel() df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]}) e = bde.ColumnsStreamedEvent(doc, m, "data", df, 200, "setter", "invoker") assert isinstance(e.data, dict) assert e.data == {c: df[c] for c in df.columns} # ColumnsPatchedEvent ---------------------------------------------------------
TestColumnsStreamedEvent
python
apache__airflow
providers/yandex/src/airflow/providers/yandex/operators/dataproc.py
{ "start": 19866, "end": 23017 }
class ____(DataprocBaseOperator): """ Runs Spark job in Data Proc cluster. :param main_jar_file_uri: URI of jar file with job. Can be placed in HDFS or S3. :param main_class: Name of the main class of the job. :param file_uris: URIs of files used in the job. Can be placed in HDFS or S3. :param archive_uris: URIs of archive files used in the job. Can be placed in HDFS or S3. :param jar_file_uris: URIs of JAR files used in the job. Can be placed in HDFS or S3. :param properties: Properties for the job. :param args: Arguments to be passed to the job. :param name: Name of the job. Used for labeling. :param cluster_id: ID of the cluster to run job in. Will try to take the ID from Dataproc Hook object if it's specified. (templated) :param connection_id: ID of the Yandex.Cloud Airflow connection. :param packages: List of maven coordinates of jars to include on the driver and executor classpaths. :param repositories: List of additional remote repositories to search for the maven coordinates given with --packages. :param exclude_packages: List of groupId:artifactId, to exclude while resolving the dependencies provided in --packages to avoid dependency conflicts. """ def __init__( self, *, main_class: str | None = None, main_jar_file_uri: str | None = None, jar_file_uris: Iterable[str] | None = None, archive_uris: Iterable[str] | None = None, file_uris: Iterable[str] | None = None, args: Iterable[str] | None = None, properties: dict[str, str] | None = None, name: str = "Spark job", cluster_id: str | None = None, connection_id: str | None = None, packages: Iterable[str] | None = None, repositories: Iterable[str] | None = None, exclude_packages: Iterable[str] | None = None, **kwargs, ) -> None: super().__init__(yandex_conn_id=connection_id, cluster_id=cluster_id, **kwargs) self.main_class = main_class self.main_jar_file_uri = main_jar_file_uri self.jar_file_uris = jar_file_uris self.archive_uris = archive_uris self.file_uris = file_uris self.args = args self.properties = properties self.name = name self.packages = packages self.repositories = repositories self.exclude_packages = exclude_packages def execute(self, context: Context) -> None: hook = self._setup(context) hook.dataproc_client.create_spark_job( main_class=self.main_class, main_jar_file_uri=self.main_jar_file_uri, jar_file_uris=self.jar_file_uris, archive_uris=self.archive_uris, file_uris=self.file_uris, args=self.args, properties=self.properties, packages=self.packages, repositories=self.repositories, exclude_packages=self.exclude_packages, name=self.name, cluster_id=self.cluster_id, )
DataprocCreateSparkJobOperator
python
apache__airflow
airflow-core/src/airflow/models/expandinput.py
{ "start": 5064, "end": 6543 }
class ____: value: list EXPAND_INPUT_TYPE: ClassVar[str] = "list-of-dicts" def get_parse_time_mapped_ti_count(self) -> int: if isinstance(self.value, Sized): return len(self.value) raise NotFullyPopulated({"expand_kwargs() argument"}) def get_total_map_length(self, run_id: str, *, session: Session) -> int: from airflow.models.xcom_arg import get_task_map_length if isinstance(self.value, Sized): return len(self.value) length = get_task_map_length(self.value, run_id, session=session) if length is None: raise NotFullyPopulated({"expand_kwargs() argument"}) return length def iter_references(self) -> Iterable[tuple[Operator, str]]: from airflow.models.referencemixin import ReferenceMixin if isinstance(self.value, ReferenceMixin): yield from self.value.iter_references() else: for x in self.value: if isinstance(x, ReferenceMixin): yield from x.iter_references() _EXPAND_INPUT_TYPES: dict[str, type[SchedulerExpandInput]] = { "dict-of-lists": SchedulerDictOfListsExpandInput, "list-of-dicts": SchedulerListOfDictsExpandInput, } SchedulerExpandInput = SchedulerDictOfListsExpandInput | SchedulerListOfDictsExpandInput def create_expand_input(kind: str, value: Any) -> SchedulerExpandInput: return _EXPAND_INPUT_TYPES[kind](value)
SchedulerListOfDictsExpandInput
python
realpython__materials
python-practice-problems/sudokusolve.py
{ "start": 1377, "end": 3544 }
class ____(unittest.TestCase): problems = [ "00400607900000060205609230007806103050900040602054089000741092010500" "0000840600100", "01640000020000900040000006207023010010000000300308704096000000500080" "0007000006820", "04900860500300700000000003000040080006081502000100900001000000000060" "0400804500390", "76050000000006000800000040320040080008000003000500100780900000060001" "0000000003041", "00060500000302080004509027050000000106200054040000000709806045000604" "0700000203000", "40900070500001000000620780020000000900370420080000000400280150000006" "0000905000406", "00001003004007050100200800668000000300030200030000004520050080080104" "0020090020000", "08007003026005001800000040000060200039001008600070900000400080081004" "0052050090070", "00009300600080090002000610000008005300600020037005000000250004000100" "9000700130007", ] expected = [ "28413657991375468275689234147896123553928741662154389736741592819532" "8764842679153", "31645297828567931449731856287923415614296578365318724996872143552184" "3697734596821", "14923867562395714875814623993547286146781592328136975431679458259268" "3417874521396", "76354812942136975895817246329743681518679523434582169781925437663491" "7582572683941", "82967531467312489514539827658743692196281754343195268739876145221654" "9738754283169", "41963872572851964353624789125418637919375426886792315464289153737146" "5982985372416", "76891543294327658151243879668519427317435296832968714523756981485174" "3629496821357", "48197623526745391893582146717863254939251478654678932172416589381934" "7652653298174", "Unsolvable", ] def test_solver(self): for index, problem in enumerate(self.problems): print(f"Testing puzzle {index + 1}") result = sudoku_solve(problem) self.assertEqual(result, self.expected[index]) if __name__ == "__main__": unittest.main()
SudokuSolverTestCase
python
pydantic__pydantic
tests/test_forward_ref.py
{ "start": 36042, "end": 36375 }
class ____(BaseModel): dc: DC2 """) Model = module_2.Model Model(dc=dict(a=1, b='not_an_int')) @pytest.mark.skipif(sys.version_info < (3, 12), reason='Requires PEP 695 syntax') def test_class_locals_are_kept_during_schema_generation(create_module): create_module( """ from pydantic import BaseModel
Model
python
ray-project__ray
python/ray/tune/tests/test_commands.py
{ "start": 434, "end": 6045 }
class ____: def __enter__(self): self._stdout = sys.stdout sys.stdout = self._stringio = StringIO() self.captured = [] return self def __exit__(self, *args): self.captured.extend(self._stringio.getvalue().splitlines()) del self._stringio # free up some memory sys.stdout = self._stdout @pytest.fixture def start_ray(): ray.init(log_to_driver=False, local_mode=True) yield ray.shutdown() def test_time(start_ray, tmpdir, monkeypatch): experiment_name = "test_time" num_samples = 2 def train_fn(config): for i in range(3): with create_dict_checkpoint({"dummy": "data"}) as checkpoint: ray.tune.report( { "epoch": i, "a": random.random(), "b/c": random.random(), "d": random.random(), }, checkpoint=checkpoint, ) tuner = tune.Tuner( train_fn, param_space={f"hp{i}": tune.uniform(0, 1) for i in range(100)}, tune_config=tune.TuneConfig(num_samples=num_samples), run_config=ray.tune.RunConfig(name=experiment_name), ) results = tuner.fit() times = [] for _ in range(5): start = time.time() subprocess.check_call(["tune", "ls", results.experiment_path]) times += [time.time() - start] print("Average CLI time: ", sum(times) / len(times)) assert sum(times) / len(times) < 5, "CLI is taking too long!" @mock.patch( "ray.tune.cli.commands.print_format_output", wraps=ray.tune.cli.commands.print_format_output, ) def test_ls(mock_print_format_output, start_ray, tmpdir): """This test captures output of list_trials.""" experiment_name = "test_ls" experiment_path = os.path.join(str(tmpdir), experiment_name) num_samples = 3 tune.run( MyTrainableClass, name=experiment_name, stop={"training_iteration": 1}, num_samples=num_samples, storage_path=str(tmpdir), ) columns = ["episode_reward_mean", "training_iteration", "trial_id"] limit = 2 commands.list_trials(experiment_path, info_keys=columns, limit=limit) # The dataframe that is printed as a table is the first arg of the last # call made to `ray.tune.cli.commands.print_format_output`. mock_print_format_output.assert_called() args, _ = mock_print_format_output.call_args_list[-1] df = args[0] assert sorted(df.columns.to_list()) == sorted(columns), df assert len(df.index) == limit, df commands.list_trials( experiment_path, sort=["trial_id"], info_keys=("trial_id", "training_iteration"), filter_op="training_iteration == 1", ) args, _ = mock_print_format_output.call_args_list[-1] df = args[0] assert sorted(df.columns.to_list()) == sorted(["trial_id", "training_iteration"]) assert len(df.index) == num_samples with pytest.raises(click.ClickException): commands.list_trials( experiment_path, sort=["trial_id"], info_keys=("training_iteration",) ) with pytest.raises(click.ClickException): commands.list_trials(experiment_path, info_keys=("asdf",)) @mock.patch( "ray.tune.cli.commands.print_format_output", wraps=ray.tune.cli.commands.print_format_output, ) def test_ls_with_cfg(mock_print_format_output, start_ray, tmpdir): experiment_name = "test_ls_with_cfg" experiment_path = os.path.join(str(tmpdir), experiment_name) tune.run( MyTrainableClass, name=experiment_name, stop={"training_iteration": 1}, config={"test_variable": tune.grid_search(list(range(5)))}, storage_path=str(tmpdir), ) columns = [CONFIG_PREFIX + "/test_variable", "trial_id"] limit = 4 commands.list_trials(experiment_path, info_keys=columns, limit=limit) # The dataframe that is printed as a table is the first arg of the last # call made to `ray.tune.cli.commands.print_format_output`. mock_print_format_output.assert_called() args, _ = mock_print_format_output.call_args_list[-1] df = args[0] assert sorted(df.columns.to_list()) == sorted(columns), df assert len(df.index) == limit, df def test_lsx(start_ray, tmpdir): """This test captures output of list_experiments.""" project_path = str(tmpdir) num_experiments = 3 for i in range(num_experiments): experiment_name = "test_lsx{}".format(i) tune.run( MyTrainableClass, name=experiment_name, stop={"training_iteration": 1}, num_samples=1, storage_path=project_path, ) limit = 2 with Capturing() as output: commands.list_experiments( project_path, info_keys=("total_trials",), limit=limit ) lines = output.captured assert "total_trials" in lines[1] assert lines[1].count("|") == 2 assert len(lines) == 3 + limit + 1 with Capturing() as output: commands.list_experiments( project_path, sort=["total_trials"], info_keys=("total_trials",), filter_op="total_trials == 1", ) lines = output.captured assert sum("1" in line for line in lines) >= num_experiments assert len(lines) == 3 + num_experiments + 1 if __name__ == "__main__": # Make click happy in bazel. os.environ["LC_ALL"] = "en_US.UTF-8" os.environ["LANG"] = "en_US.UTF-8" sys.exit(pytest.main([__file__]))
Capturing
python
walkccc__LeetCode
solutions/880. Decoded String at Index/880.py
{ "start": 0, "end": 340 }
class ____: def decodeAtIndex(self, s: str, k: int) -> str: size = 0 for c in s: if c.isdigit(): size *= int(c) else: size += 1 for c in reversed(s): k %= size if k == 0 and c.isalpha(): return c if c.isdigit(): size //= int(c) else: size -= 1
Solution
python
doocs__leetcode
solution/0000-0099/0089.Gray Code/Solution.py
{ "start": 0, "end": 114 }
class ____: def grayCode(self, n: int) -> List[int]: return [i ^ (i >> 1) for i in range(1 << n)]
Solution
python
django__django
tests/defer/tests.py
{ "start": 16347, "end": 17250 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.secondary = Secondary.objects.create(first="a", second="b") cls.primary = PrimaryOneToOne.objects.create( name="Bella", value="Baxter", related=cls.secondary ) def test_defer_not_clear_cached_relations(self): obj = Secondary.objects.defer("first").get(pk=self.secondary.pk) with self.assertNumQueries(1): obj.primary_o2o obj.first # Accessing a deferred field. with self.assertNumQueries(0): obj.primary_o2o def test_only_not_clear_cached_relations(self): obj = Secondary.objects.only("first").get(pk=self.secondary.pk) with self.assertNumQueries(1): obj.primary_o2o obj.second # Accessing a deferred field. with self.assertNumQueries(0): obj.primary_o2o
DeferredRelationTests
python
coleifer__peewee
tests/postgres.py
{ "start": 4282, "end": 9536 }
class ____(ModelTestCase): database = db_loader('postgres', db_class=PostgresqlExtDatabase, register_hstore=True) requires = [HStoreModel] def setUp(self): super(TestHStoreField, self).setUp() self.t1 = HStoreModel.create(name='t1', data={'k1': 'v1', 'k2': 'v2'}) self.t2 = HStoreModel.create(name='t2', data={'k2': 'v2', 'k3': 'v3'}) def by_name(self, name): return HStoreModel.get(HStoreModel.name == name).data def test_hstore_storage(self): self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'v2'}) self.assertEqual(self.by_name('t2'), {'k2': 'v2', 'k3': 'v3'}) self.t1.data = {'k4': 'v4'} self.t1.save() self.assertEqual(self.by_name('t1'), {'k4': 'v4'}) HStoreModel.create(name='t3', data={}) self.assertEqual(self.by_name('t3'), {}) def query(self, *cols): return (HStoreModel .select(HStoreModel.name, *cols) .order_by(HStoreModel.id)) def test_hstore_selecting(self): query = self.query(D.keys().alias('keys')) self.assertEqual([(x.name, sorted(x.keys)) for x in query], [ ('t1', ['k1', 'k2']), ('t2', ['k2', 'k3'])]) query = self.query(D.values().alias('vals')) self.assertEqual([(x.name, sorted(x.vals)) for x in query], [ ('t1', ['v1', 'v2']), ('t2', ['v2', 'v3'])]) query = self.query(D.items().alias('mtx')) self.assertEqual([(x.name, sorted(x.mtx)) for x in query], [ ('t1', [['k1', 'v1'], ['k2', 'v2']]), ('t2', [['k2', 'v2'], ['k3', 'v3']])]) query = self.query(D.slice('k2', 'k3').alias('kz')) self.assertEqual([(x.name, x.kz) for x in query], [ ('t1', {'k2': 'v2'}), ('t2', {'k2': 'v2', 'k3': 'v3'})]) query = self.query(D.slice('k4').alias('kz')) self.assertEqual([(x.name, x.kz) for x in query], [ ('t1', {}), ('t2', {})]) query = self.query(D.exists('k3').alias('ke')) self.assertEqual([(x.name, x.ke) for x in query], [ ('t1', False), ('t2', True)]) query = self.query(D.defined('k3').alias('ke')) self.assertEqual([(x.name, x.ke) for x in query], [ ('t1', False), ('t2', True)]) query = self.query(D['k1'].alias('k1')) self.assertEqual([(x.name, x.k1) for x in query], [ ('t1', 'v1'), ('t2', None)]) query = self.query().where(D['k1'] == 'v1') self.assertEqual([x.name for x in query], ['t1']) def assertWhere(self, expr, names): query = HStoreModel.select().where(expr) self.assertEqual([x.name for x in query], names) def test_hstore_filtering(self): self.assertWhere(D == {'k1': 'v1', 'k2': 'v2'}, ['t1']) self.assertWhere(D == {'k2': 'v2'}, []) self.assertWhere(D.contains('k3'), ['t2']) self.assertWhere(D.contains(['k2', 'k3']), ['t2']) self.assertWhere(D.contains(['k2']), ['t1', 't2']) # test dict self.assertWhere(D.contains({'k2': 'v2', 'k3': 'v3'}), ['t2']) self.assertWhere(D.contains({'k2': 'v2'}), ['t1', 't2']) self.assertWhere(D.contains({'k2': 'v3'}), []) # test contains any. self.assertWhere(D.contains_any('k3', 'kx'), ['t2']) self.assertWhere(D.contains_any('k2', 'x', 'k3'), ['t1', 't2']) self.assertWhere(D.contains_any('x', 'kx', 'y'), []) def test_hstore_filter_functions(self): self.assertWhere(D.exists('k2') == True, ['t1', 't2']) self.assertWhere(D.exists('k3') == True, ['t2']) self.assertWhere(D.defined('k2') == True, ['t1', 't2']) self.assertWhere(D.defined('k3') == True, ['t2']) def test_hstore_update(self): rc = (HStoreModel .update(data=D.update(k4='v4')) .where(HStoreModel.name == 't1') .execute()) self.assertTrue(rc > 0) self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'v2', 'k4': 'v4'}) rc = (HStoreModel .update(data=D.update(k5='v5', k6='v6')) .where(HStoreModel.name == 't2') .execute()) self.assertTrue(rc > 0) self.assertEqual(self.by_name('t2'), {'k2': 'v2', 'k3': 'v3', 'k5': 'v5', 'k6': 'v6'}) HStoreModel.update(data=D.update(k2='vxxx')).execute() self.assertEqual([x.data for x in self.query(D)], [ {'k1': 'v1', 'k2': 'vxxx', 'k4': 'v4'}, {'k2': 'vxxx', 'k3': 'v3', 'k5': 'v5', 'k6': 'v6'}]) (HStoreModel .update(data=D.delete('k4')) .where(HStoreModel.name == 't1') .execute()) self.assertEqual(self.by_name('t1'), {'k1': 'v1', 'k2': 'vxxx'}) HStoreModel.update(data=D.delete('k5')).execute() self.assertEqual([x.data for x in self.query(D)], [ {'k1': 'v1', 'k2': 'vxxx'}, {'k2': 'vxxx', 'k3': 'v3', 'k6': 'v6'} ]) HStoreModel.update(data=D.delete('k1', 'k2')).execute() self.assertEqual([x.data for x in self.query(D)], [ {}, {'k3': 'v3', 'k6': 'v6'}])
TestHStoreField
python
pytest-dev__pytest
src/_pytest/warning_types.py
{ "start": 2883, "end": 3345 }
class ____(Generic[_W]): """A warning meant to be formatted during runtime. This is used to hold warnings that need to format their message at runtime, as opposed to a direct message. """ category: type[_W] template: str def format(self, **kwargs: Any) -> _W: """Return an instance of the warning category, formatted with given kwargs.""" return self.category(self.template.format(**kwargs)) @final
UnformattedWarning
python
python-pillow__Pillow
src/PIL/BlpImagePlugin.py
{ "start": 12384, "end": 14293 }
class ____(_BLPBaseDecoder): def _load(self) -> None: self._compression, self._encoding, alpha, self._alpha_encoding = self.args palette = self._read_palette() assert self.fd is not None self.fd.seek(self._offsets[0]) if self._compression == 1: # Uncompressed or DirectX compression if self._encoding == Encoding.UNCOMPRESSED: data = self._read_bgra(palette, alpha) elif self._encoding == Encoding.DXT: data = bytearray() if self._alpha_encoding == AlphaEncoding.DXT1: linesize = (self.state.xsize + 3) // 4 * 8 for yb in range((self.state.ysize + 3) // 4): for d in decode_dxt1(self._safe_read(linesize), alpha): data += d elif self._alpha_encoding == AlphaEncoding.DXT3: linesize = (self.state.xsize + 3) // 4 * 16 for yb in range((self.state.ysize + 3) // 4): for d in decode_dxt3(self._safe_read(linesize)): data += d elif self._alpha_encoding == AlphaEncoding.DXT5: linesize = (self.state.xsize + 3) // 4 * 16 for yb in range((self.state.ysize + 3) // 4): for d in decode_dxt5(self._safe_read(linesize)): data += d else: msg = f"Unsupported alpha encoding {repr(self._alpha_encoding)}" raise BLPFormatError(msg) else: msg = f"Unknown BLP encoding {repr(self._encoding)}" raise BLPFormatError(msg) else: msg = f"Unknown BLP compression {repr(self._compression)}" raise BLPFormatError(msg) self.set_as_raw(data)
BLP2Decoder
python
gevent__gevent
src/gevent/tests/test__core_timer.py
{ "start": 1927, "end": 2381 }
class ____(Test): repeat = 1 def test_main(self): # Again works for a new timer x = self.timer x.again(self.f, x) self.assertTimerInKeepalive() self.assertEqual(x.args, (x,)) # XXX: On libev, this takes 1 second. On libuv, # it takes the expected time. self.loop.run() self.assertEqual(self.called, [1]) x.stop() self.assertTimerNotInKeepalive()
TestAgain
python
huggingface__transformers
src/transformers/models/cwm/modular_cwm.py
{ "start": 8666, "end": 9166 }
class ____(Qwen2Attention): def __init__(self, config: CwmConfig, layer_idx: int): super().__init__(config=config, layer_idx=layer_idx) self.q_proj = torch.nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
CwmAttention
python
huggingface__transformers
src/transformers/models/xcodec/modeling_xcodec.py
{ "start": 12190, "end": 17123 }
class ____(PreTrainedAudioTokenizerBase): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XcodecConfig base_model_prefix = "xcodec" main_input_name = "input_values" input_modalities = "audio" @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, nn.Conv1d): init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) init.uniform_(module.bias, a=-k, b=k) elif module.__class__.__name__ == "Snake1d": init.ones_(module.alpha) elif isinstance(module, nn.ConvTranspose1d): module.reset_parameters() elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=0.02) elif isinstance(module, XcodecModel): # The conv1d are not handled correctly, as `self.acoustic_encoder/decoder` are initialized from a PreTrainedModel, # but then only the submodules are used (which are not PreTrainedModels...) -> here we reinit them as in DacModel for submodule in module.acoustic_encoder.modules(): if isinstance(submodule, nn.Conv1d): init.trunc_normal_(submodule.weight, std=0.02) init.constant_(submodule.bias, 0) for submodule in module.acoustic_decoder.modules(): if isinstance(submodule, nn.Conv1d): init.trunc_normal_(submodule.weight, std=0.02) init.constant_(submodule.bias, 0) def apply_weight_norm(self): """Apply weight norm in the acoustic encoder and decoder because the original checkpoint has weight norm applied.""" weight_norm = torch.nn.utils.weight_norm if hasattr(torch.nn.utils.parametrizations, "weight_norm"): weight_norm = torch.nn.utils.parametrizations.weight_norm weight_norm(self.acoustic_encoder.conv1) weight_norm(self.acoustic_encoder.conv2) for block in self.acoustic_encoder.block: weight_norm(block.conv1) for res_unit in (block.res_unit1, block.res_unit2, block.res_unit3): weight_norm(res_unit.conv1) weight_norm(res_unit.conv2) weight_norm(self.acoustic_decoder.conv1, name="weight") weight_norm(self.acoustic_decoder.conv2, name="weight") for block in self.acoustic_decoder.block: weight_norm(block.conv_t1, name="weight") for res_unit in (block.res_unit1, block.res_unit2, block.res_unit3): weight_norm(res_unit.conv1, name="weight") weight_norm(res_unit.conv2, name="weight") def remove_weight_norm(self): """Remove the weight norm from the acoustic encoder and decoder.""" for module in (self.acoustic_encoder, self.acoustic_decoder): for m in module.modules(): try: torch.nn.utils.remove_weight_norm(m, name="weight") except (ValueError, AttributeError): pass if hasattr(m, "parametrizations") and "weight" in m.parametrizations: torch.nn.utils.parametrize.remove_parametrizations(m, "weight", leave_parametrized=True) @lru_cache def _get_conv1d_layers(self, module): """ Recursively iterate to fetch all Conv1d layers. """ def get_conv1d_layers_recursive(module: nn.Module): params_list = [] if isinstance(module, nn.Conv1d): params_list.append(module) # Recursively check all child modules for child in module.children(): params_list.extend(get_conv1d_layers_recursive(child)) return params_list return tuple(get_conv1d_layers_recursive(module)) def _get_conv1d_output_lengths(self, input_length, module=None): """ For a given module, compute the output length that would be obtained after all Conv1d layers. """ if module is None: module = self conv1d_layers = self._get_conv1d_layers(module) for layer in conv1d_layers: input_length = conv1d_output_length(layer, input_length) return input_length @auto_docstring(custom_intro="""The Xcodec neural audio codec model.""")
XcodecPreTrainedModel
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
{ "start": 2870, "end": 3291 }
class ____(Protocol): @property def f(self) -> int: ... @property def g(self) -> str: ... # end # no error def f( a, ): pass # end # no error if True: class Class: """Docstring""" def function(self): ... # end # no error if True: def function(self): ... # end # no error @decorator # comment @decorator def function(): pass # end # no error
C
python
plotly__plotly.py
plotly/graph_objs/box/selected/_marker.py
{ "start": 233, "end": 3564 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "box.selected" _path_str = "box.selected.marker" _valid_props = {"color", "opacity", "size"} @property def color(self): """ Sets the marker color of selected points. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def opacity(self): """ Sets the marker opacity of selected points. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["opacity"] @opacity.setter def opacity(self, val): self["opacity"] = val @property def size(self): """ Sets the marker size of selected points. The 'size' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def _prop_descriptions(self): return """\ color Sets the marker color of selected points. opacity Sets the marker opacity of selected points. size Sets the marker size of selected points. """ def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.box.selected.Marker` color Sets the marker color of selected points. opacity Sets the marker opacity of selected points. size Sets the marker size of selected points. Returns ------- Marker """ super().__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.box.selected.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.box.selected.Marker`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("opacity", arg, opacity) self._set_property("size", arg, size) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Marker
python
getsentry__sentry
src/sentry/sentry_metrics/querying/data/query.py
{ "start": 436, "end": 2851 }
class ____: """ Represents an MQL query that can be run against Snuba. Example: # Defining a simple query. query = MQLQuery("avg(d:transactions/duration@millisecond)", order=QueryOrder.ASC, limit=10) # Defining more complex queries that depend on each other. query_1 = MQLQuery("avg(d:transactions/duration@millisecond)") query_2 = MQLQuery("sum(d:transactions/duration@millisecond)") query = MQLQuery("$query_1 / $query_2", order=QueryOrder.ASC, query_1=query_1, query_2=query_2) """ def __init__( self, mql: str, order: QueryOrder | None = None, limit: int | None = None, **sub_queries ): self.mql = mql self.order = order self.limit = limit self.sub_queries = self._validate_sub_queries(sub_queries) @staticmethod def _validate_sub_queries(sub_queries: Mapping[str, Any]) -> Mapping[str, "MQLQuery"]: for name, query in sub_queries.items(): if not isinstance(query, MQLQuery): raise InvalidMetricsQueryError("A subquery must be an instance of 'MQLQuery'") return cast(Mapping[str, MQLQuery], sub_queries) def compile(self) -> "MQLQuery": """ Compiles the MQL query by replacing all variables inside the formulas with the corresponding queries. For example, a formula in the form "$a + $b" with queries "a: max(mri_1), b: min(mri_2)" will become "max(mri_1) + min(mri_2)". The rationale for having queries being defined as variables in formulas is to have a structure which is more flexible and allows reuse of the same query across multiple formulas. Returns: A new MQLQuery with the MQL string containing the replaced formula. """ sub_queries = {name: query.compile() for name, query in self.sub_queries.items()} replaced_mql_formula = self.mql # We sort query names by length and content with the goal of trying to always match the longest queries first. sorted_query_names = sorted(sub_queries.keys(), key=lambda q: (len(q), q), reverse=True) for query_name in sorted_query_names: replaced_mql_formula = replaced_mql_formula.replace( f"${query_name}", sub_queries[query_name].mql ) return MQLQuery(mql=replaced_mql_formula, order=self.order, limit=self.limit) @dataclass(frozen=True)
MQLQuery
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_events_reader.py
{ "start": 26496, "end": 31673 }
class ____(GraphExecutionTraceDigest): """Detailed data object describing an intra-graph tensor execution. Attributes (in addition to GraphExecutionTraceDigest): graph_ids: The debugger-generated IDs of the graphs that enclose the executed op (tensor), ordered from the outermost to the innermost. graph_id: The debugger-generated ID of the innermost (immediately-enclosing) graph. tensor_debug_mode: TensorDebugMode enum value. debug_tensor_value: Debug tensor values (only for non-FULL_TENSOR tensor_debug_mode). A list of numbers. See the documentation of the TensorDebugModes for the semantics of the numbers. device_name: Device on which the tensor resides (if available) """ def __init__(self, graph_execution_trace_digest, graph_ids, tensor_debug_mode, debug_tensor_value=None, device_name=None): super().__init__(graph_execution_trace_digest.wall_time, graph_execution_trace_digest.locator, graph_execution_trace_digest.op_type, graph_execution_trace_digest.op_name, graph_execution_trace_digest.output_slot, graph_execution_trace_digest.graph_id) self._graph_ids = tuple(graph_ids) self._tensor_debug_mode = tensor_debug_mode self._debug_tensor_value = debug_tensor_value self._device_name = device_name @property def graph_ids(self): return self._graph_ids @property def graph_id(self): return self._graph_ids[-1] @property def tensor_debug_mode(self): return self._tensor_debug_mode @property def debug_tensor_value(self): return _tuple_or_none(self._debug_tensor_value) @property def device_name(self): return self._device_name def to_json(self): output = super().to_json() output.update({ "graph_ids": self.graph_ids, "tensor_debug_mode": self.tensor_debug_mode, "debug_tensor_value": self.debug_tensor_value, "device_name": self.device_name, }) return output def _parse_tensor_value(tensor_proto, return_list=False): """Helper method for reading a tensor value from a tensor proto. The rationale for the distinction between `True` and `False value of `return_list` is as follows: - `return_list=True` is used for TensorDebugMode values other than FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under those modes, the value is guaranteed (by contract) to be a 1D float64 tensor. - `return_list=False` is used for the FULL_HEALTH TensorDebugMode specifically. Instead, we use `numpy.ndarray` to maximally preserve the shape, dtype and value information regarding the underlying tensor value. Under that mode, we don't use a python list to represent the tensor value because that can lead to loss of information (e.g., both float16 and float32 dtypes get mapped to Python floats). Args: tensor_proto: The TensorProto instance from which the tensor value will be loaded. return_list: Whether the return value will be a nested Python list that comes out from `numpy.ndarray.tolist()`. Returns: If parsing is successful, the tensor value as a `numpy.ndarray` or the nested Python list converted from it. If parsing fails, `None`. """ try: ndarray = tensor_util.MakeNdarray(tensor_proto) return ndarray.tolist() if return_list else ndarray except TypeError: # Depending on tensor_debug_mode, certain dtype of tensors don't # have logged debug tensor values. return None def _execution_digest_from_debug_event_proto(debug_event, locator): """Convert a DebugEvent proto into an ExecutionDigest data object.""" return ExecutionDigest( debug_event.wall_time, locator, debug_event.execution.op_type, output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids or None)) def _execution_from_debug_event_proto(debug_event, locator): """Convert a DebugEvent proto into an Execution data object.""" execution_proto = debug_event.execution debug_tensor_values = None if (execution_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR): pass # TODO(cais): Build tensor store. elif (execution_proto.tensor_debug_mode != debug_event_pb2.TensorDebugMode.NO_TENSOR): debug_tensor_values = [] for tensor_proto in execution_proto.tensor_protos: # TODO(cais): Refactor into a helper method. debug_tensor_values.append( _parse_tensor_value(tensor_proto, return_list=True)) return Execution( _execution_digest_from_debug_event_proto(debug_event, locator), execution_proto.code_location.host_name, tuple(execution_proto.code_location.stack_frame_ids), execution_proto.tensor_debug_mode, graph_id=execution_proto.graph_id, input_tensor_ids=tuple(execution_proto.input_tensor_ids), output_tensor_ids=tuple(execution_proto.output_tensor_ids), debug_tensor_values=_tuple_or_none(debug_tensor_values))
GraphExecutionTrace
python
wandb__wandb
wandb/vendor/pygments/lexers/templates.py
{ "start": 34169, "end": 34687 }
class ____(DelegatingLexer): """ A lexer that highlights CSS definitions in genshi text templates. """ name = 'CSS+Genshi Text' aliases = ['css+genshitext', 'css+genshi'] alias_filenames = ['*.css'] mimetypes = ['text/css+genshi'] def __init__(self, **options): super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer, **options) def analyse_text(text): return GenshiLexer.analyse_text(text) - 0.05
CssGenshiLexer
python
celery__celery
t/unit/utils/test_threads.py
{ "start": 1092, "end": 1492 }
class ____: def test_stack(self): x = _LocalStack() assert x.pop() is None x.__release_local__() ident = x.__ident_func__ x.__ident_func__ = ident with pytest.raises(RuntimeError): x()[0] x.push(['foo']) assert x()[0] == 'foo' x.pop() with pytest.raises(RuntimeError): x()[0]
test_LocalStack
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pep8_naming/N803.py
{ "start": 40, "end": 180 }
class ____: def method(self, _, a, A): return _, a, A def func(_, setUp): return _, setUp from typing import override
Class
python
google__jax
jax/_src/interpreters/pxla.py
{ "start": 50180, "end": 51032 }
class ____: __slots__ = ("handler", "in_shardings", "in_layouts", "local_devices", "input_indices") def __init__(self, in_shardings, in_layouts, local_devices=None, input_indices=None): self.handler = partial( shard_args, in_shardings, in_layouts, [xc.ArrayCopySemantics.REUSE_INPUT] * len(in_shardings)) self.in_shardings = in_shardings self.in_layouts = in_layouts self.local_devices = local_devices self.input_indices = input_indices def __call__(self, input_buffers): return self.handler(input_buffers) def __str__(self): return ("InputsHandler(\n" f"in_shardings={self.in_shardings},\n" f"in_layouts={self.in_layouts},\n" f"local_devices={self.local_devices},\n" f"input_indices={self.input_indices})")
InputsHandler
python
huggingface__transformers
tests/models/dac/test_feature_extraction_dac.py
{ "start": 3340, "end": 8903 }
class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = DacFeatureExtractor def setUp(self): self.feat_extract_tester = DacFeatureExtractionTester(self) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs] # Test not batched input encoded_sequences_1 = feat_extract(audio_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(audio_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_audio_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_audio_inputs = np.random.rand(100).astype(np.float64) py_audio_inputs = np_audio_inputs.tolist() for inputs in [py_audio_inputs, np_audio_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech audio_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in audio_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [ 2.3803711e-03, 2.0751953e-03, 1.9836426e-03, 2.1057129e-03, 1.6174316e-03, 3.0517578e-04, 9.1552734e-05, 3.3569336e-04, 9.7656250e-04, 1.8310547e-03, 2.0141602e-03, 2.1057129e-03, 1.7395020e-03, 4.5776367e-04, -3.9672852e-04, 4.5776367e-04, 1.0070801e-03, 9.1552734e-05, 4.8828125e-04, 1.1596680e-03, 7.3242188e-04, 9.4604492e-04, 1.8005371e-03, 1.8310547e-03, 8.8500977e-04, 4.2724609e-04, 4.8828125e-04, 7.3242188e-04, 1.0986328e-03, 2.1057129e-03] ) # fmt: on input_audio = self._load_datasamples(1) feature_extractor = DacFeatureExtractor() input_values = feature_extractor(input_audio, return_tensors="pt")["input_values"] self.assertEqual(input_values.shape, (1, 1, 93696)) torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) audio_input_end = torch.tensor(input_audio[0][-30:], dtype=torch.float32) torch.testing.assert_close(input_values[0, 0, -46:-16], audio_input_end, rtol=1e-4, atol=1e-4) # Ignore copy @unittest.skip("The DAC model doesn't support stereo logic") def test_integration_stereo(self): pass # Ignore copy def test_truncation_and_padding(self): input_audio = self._load_datasamples(2) # would be easier if the stride was like feature_extractor = DacFeatureExtractor() # pad and trunc raise an error ? with self.assertRaisesRegex( ValueError, "^Both padding and truncation were set. Make sure you only set one.$", ): truncated_outputs = feature_extractor( input_audio, padding="max_length", truncation=True, return_tensors="pt" ).input_values # force truncate to max_length truncated_outputs = feature_extractor( input_audio, truncation=True, max_length=48000, return_tensors="pt" ).input_values self.assertEqual(truncated_outputs.shape, (2, 1, 48128)) # pad: padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values self.assertEqual(padded_outputs.shape, (2, 1, 93696)) # force pad to max length truncated_outputs = feature_extractor( input_audio, padding="max_length", max_length=100000, return_tensors="pt" ).input_values self.assertEqual(truncated_outputs.shape, (2, 1, 100352)) # force no pad with self.assertRaisesRegex( ValueError, "^Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.$", ): truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values self.assertEqual(truncated_outputs.shape, (1, 1, 93680))
DacFeatureExtractionTest
python
mlflow__mlflow
mlflow/webhooks/types.py
{ "start": 8599, "end": 9245 }
class ____(TypedDict): """Payload sent when a tag is deleted from a prompt version. Example payload: .. code-block:: python { "name": "example_prompt", "version": "1", "key": "example_key", } """ name: str """The name of the prompt.""" version: str """The version of the prompt.""" key: str """The tag key being deleted.""" @classmethod def example(cls) -> "PromptVersionTagDeletedPayload": return cls( name="example_prompt", version="1", key="example_key", )
PromptVersionTagDeletedPayload
python
huggingface__transformers
tests/models/dpr/test_tokenization_dpr.py
{ "start": 1637, "end": 3794 }
class ____(test_tokenization_bert.BertTokenizationTest): tokenizer_class = DPRReaderTokenizer rust_tokenizer_class = DPRReaderTokenizerFast test_rust_tokenizer = True test_seq2seq = False from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base" @slow def test_decode_best_spans(self): tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False) input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3] reader_input = BatchEncoding({"input_ids": input_ids}) start_logits = [[0] * len(input_ids[0])] end_logits = [[0] * len(input_ids[0])] relevance_logits = [0] reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits) start_index, end_index = 8, 9 start_logits[0][start_index] = 10 end_logits[0][end_index] = 10 predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output) self.assertEqual(predicted_spans[0].start_index, start_index) self.assertEqual(predicted_spans[0].end_index, end_index) self.assertEqual(predicted_spans[0].doc_id, 0) @slow def test_call(self): tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased") text_1 = tokenizer.encode("question sequence", add_special_tokens=False) text_2 = tokenizer.encode("title sequence", add_special_tokens=False) text_3 = tokenizer.encode("text sequence", add_special_tokens=False) expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3 encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"]) self.assertIn("input_ids", encoded_input) self.assertIn("attention_mask", encoded_input) self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)
DPRReaderTokenizationTest
python
getsentry__sentry
src/sentry/api/serializers/models/organization.py
{ "start": 20283, "end": 21770 }
class ____(_DetailedOrganizationSerializerResponseOptional): experiments: Any isDefault: bool defaultRole: str # TODO: replace with enum/literal availableRoles: list[Any] # TODO: deprecated, use orgRoleList orgRoleList: list[OrganizationRoleSerializerResponse] teamRoleList: list[TeamRoleSerializerResponse] openMembership: bool allowSharedIssues: bool enhancedPrivacy: bool dataScrubber: bool dataScrubberDefaults: bool sensitiveFields: list[str] safeFields: list[str] storeCrashReports: int attachmentsRole: str # TODO: replace with enum/literal debugFilesRole: str # TODO: replace with enum/literal eventsMemberAdmin: bool alertsMemberWrite: bool scrubIPAddresses: bool scrapeJavaScript: bool allowJoinRequests: bool relayPiiConfig: str | None trustedRelays: list[TrustedRelaySerializerResponse] pendingAccessRequests: int codecovAccess: bool hideAiFeatures: bool githubPRBot: bool githubNudgeInvite: bool gitlabPRBot: bool aggregatedDataConsent: bool genAIConsent: bool isDynamicallySampled: bool issueAlertsThreadFlag: bool metricAlertsThreadFlag: bool requiresSso: bool rollbackEnabled: bool streamlineOnly: bool defaultAutofixAutomationTuning: str defaultSeerScannerAutomation: bool enablePrReviewTestGeneration: bool enableSeerEnhancedAlerts: bool enableSeerCoding: bool
DetailedOrganizationSerializerResponse
python
getsentry__sentry
src/sentry/integrations/github/types.py
{ "start": 178, "end": 435 }
class ____(StrEnum): OPEN = "open" CLOSED = "closed" @classmethod def get_choices(cls): """Return choices formatted for dropdown selectors""" return [(status.value, status.value.capitalize()) for status in cls]
GitHubIssueStatus
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 39383, "end": 39507 }
class ____(RegexAutomationRuleMixin, CreateView): success_message = _("Automation rule created")
RegexAutomationRuleCreate
python
prabhupant__python-ds
data_structures/bst/insertion_iterative.py
{ "start": 0, "end": 862 }
class ____(): def __init__(self, val): self.val = val self.left = None self.right = None def insert(root, val): new_node = Node(val) parent = None curr = root while curr: parent = curr if curr.val <= val: curr = curr.right else: curr = curr.left if parent.val <= val: parent.right = new_node else: parent.left = new_node def inorder(root): if not root: return None stack = [] while True: if root: stack.append(root) root = root.left else: if not stack: break root = stack.pop() print(root.val, end=" ") root = root.right root = Node(4) insert(root, 2) insert(root, 6) insert(root, 1) insert(root, 8) inorder(root)
Node
python
keras-team__keras
integration_tests/basic_full_flow.py
{ "start": 684, "end": 1582 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basic_fit(self): model = MyModel(hidden_dim=2, output_dim=1) x = np.random.random((128, 4)) y = np.random.random((128, 4)) batch_size = 32 epochs = 3 model.compile( optimizer=optimizers.SGD(learning_rate=0.001), loss=losses.MeanSquaredError(), metrics=[metrics.MeanSquaredError()], ) output_before_fit = model(x) model.fit( x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2 ) output_after_fit = model(x) self.assertNotAllClose(output_before_fit, output_after_fit) def test_basic_fit_no_training(self): model = MyModel(hidden_dim=2, output_dim=1) x = np.random.random((128, 4)) model.predict(x) model(x)
BasicFlowTest
python
google__jax
jax/_src/pallas/mosaic_gpu/core.py
{ "start": 2623, "end": 5192 }
class ____(pallas_core.CompilerParams): """Mosaic GPU compiler parameters. Attributes: approx_math: If True, the compiler is allowed to use approximate implementations of some math operations, e.g. ``exp``. Defaults to False. dimension_semantics: A list of dimension semantics for each grid dimension of the kernel. Either "parallel" for dimensions that can execute in any order, or "sequential" for dimensions that must be executed sequentially. max_concurrent_steps: The maximum number of sequential stages that are active concurrently. Defaults to 1. delay_release: The number of steps to wait before reusing the input/output references. Defaults to 0, and must be strictly smaller than max_concurrent_steps. Generally, you'll want to set it to 1 if you don't await the WGMMA in the body. unsafe_no_auto_barriers: If True, Pallas will never automatically insert barrier instructions that ensure synchronous semantics of loads and stores. At the moment, the insertion is done conservatively and might regress performance. There are (at least) two conditions that must be satisfied for the use of this flag to be safe. First, no memory region is ever read *and* written to by the same thread (async copies are performed by background threads and do not count towards this rule). Secondly, no thread ever calls commit_smem(), reads from the committed SMEM and then issues an async copy overwriting that region (this is a very artificial and highly unlikely scenario). profile_space: The number of profiler events that can be collected in a single invocation. It is undefined behavior if a thread collects more events than this. profile_dir: The directory to which profiling traces will be written to. """ BACKEND: ClassVar[pallas_core.Backend] = "mosaic_gpu" approx_math: bool = False dimension_semantics: Sequence[DimensionSemantics] | None = None max_concurrent_steps: int = 1 unsafe_no_auto_barriers: bool = False profile_space: int = 0 profile_dir: str = "" lowering_semantics: mgpu.core.LoweringSemantics = mgpu.core.LoweringSemantics.Lane def __post_init__(self): if self.dimension_semantics is not None: object.__setattr__( self, "dimension_semantics", tuple(self.dimension_semantics) ) if bool(self.profile_space) ^ bool(self.profile_dir): raise ValueError( "Either both profile_space and profile_dir must be set, or neither." )
CompilerParams
python
anthropics__anthropic-sdk-python
src/anthropic/types/url_image_source_param.py
{ "start": 219, "end": 329 }
class ____(TypedDict, total=False): type: Required[Literal["url"]] url: Required[str]
URLImageSourceParam
python
django__django
django/contrib/redirects/apps.py
{ "start": 91, "end": 251 }
class ____(AppConfig): default_auto_field = "django.db.models.AutoField" name = "django.contrib.redirects" verbose_name = _("Redirects")
RedirectsConfig
python
eventlet__eventlet
eventlet/coros.py
{ "start": 39, "end": 2030 }
class ____: """This is sort of an inverse semaphore: a counter that starts at 0 and waits only if nonzero. It's used to implement a "wait for all" scenario. >>> from eventlet import coros, spawn_n >>> count = coros.metaphore() >>> count.wait() >>> def decrementer(count, id): ... print("{0} decrementing".format(id)) ... count.dec() ... >>> _ = spawn_n(decrementer, count, 'A') >>> _ = spawn_n(decrementer, count, 'B') >>> count.inc(2) >>> count.wait() A decrementing B decrementing """ def __init__(self): self.counter = 0 self.event = _event.Event() # send() right away, else we'd wait on the default 0 count! self.event.send() def inc(self, by=1): """Increment our counter. If this transitions the counter from zero to nonzero, make any subsequent :meth:`wait` call wait. """ assert by > 0 self.counter += by if self.counter == by: # If we just incremented self.counter by 'by', and the new count # equals 'by', then the old value of self.counter was 0. # Transitioning from 0 to a nonzero value means wait() must # actually wait. self.event.reset() def dec(self, by=1): """Decrement our counter. If this transitions the counter from nonzero to zero, a current or subsequent wait() call need no longer wait. """ assert by > 0 self.counter -= by if self.counter <= 0: # Don't leave self.counter < 0, that will screw things up in # future calls. self.counter = 0 # Transitioning from nonzero to 0 means wait() need no longer wait. self.event.send() def wait(self): """Suspend the caller only if our count is nonzero. In that case, resume the caller once the count decrements to zero again. """ self.event.wait()
metaphore
python
readthedocs__readthedocs.org
readthedocs/proxito/tests/storage.py
{ "start": 134, "end": 699 }
class ____(BuildMediaFileSystemStorage): """ Storage to use in El Proxito tests to have more control. Allow to specify when to return ``True`` or ``False`` depending if the file does exist or not in the storage backend. Mocking ``get_storage_class`` is not always an option, since there are other methods that should keep working normally (``.url()``) and not be mocked. """ _existing_files = [] def exists(self, path): if path in self._existing_files: return True return False
BuildMediaStorageTest
python
vyperlang__vyper
vyper/ast/nodes.py
{ "start": 46941, "end": 47396 }
class ____(Stmt): """ An `exports` declaration. Attributes ---------- annotation : Name | Attribute | Tuple List of exports """ __slots__ = ("annotation",) _only_empty_fields = ("value",) def validate(self): items = as_tuple(self.annotation) for item in items: if not isinstance(item, (Name, Attribute)): raise StructureException("invalid exports", item)
ExportsDecl
python
getsentry__sentry
src/sentry/models/repository.py
{ "start": 931, "end": 5866 }
class ____(Model): __relocation_scope__ = RelocationScope.Global organization_id = BoundedBigIntegerField(db_index=True) name = models.CharField(max_length=200) url = models.URLField(null=True) provider = models.CharField(max_length=64, null=True) # The external_id is the id of the repo in the provider's system. (e.g. GitHub's repo id) external_id = models.CharField(max_length=64, null=True) config = LegacyTextJSONField(default=dict) status = BoundedPositiveIntegerField( default=ObjectStatus.ACTIVE, choices=ObjectStatus.as_choices(), db_index=True ) date_added = models.DateTimeField(default=timezone.now) integration_id = BoundedPositiveIntegerField(db_index=True, null=True) languages = ArrayField(models.TextField(), default=list) class Meta: app_label = "sentry" db_table = "sentry_repository" unique_together = (("organization_id", "provider", "external_id"),) __repr__ = sane_repr("organization_id", "name", "provider") def has_integration_provider(self): return self.provider and self.provider.startswith("integrations:") def get_provider(self): from sentry.plugins.base import bindings if self.has_integration_provider(): provider_cls = bindings.get("integration-repository.provider").get(self.provider) return provider_cls(self.provider) provider_cls = bindings.get("repository.provider").get(self.provider) return provider_cls(self.provider) def generate_delete_fail_email(self, error_message): from sentry.utils.email import MessageBuilder new_context = { "repo": self, "error_message": error_message, "provider_name": self.get_provider().name, } return MessageBuilder( subject="Unable to Delete Repository Webhooks", context=new_context, template="sentry/emails/unable-to-delete-repo.txt", html_template="sentry/emails/unable-to-delete-repo.html", ) # pending deletion implementation _pending_fields = ("name", "external_id") def rename_on_pending_deletion(self) -> None: # Due to the fact that Repository is shown to the user # as it is pending deletion, this is added to display the fields # correctly to the user. self.config["pending_deletion_name"] = self.name rename_on_pending_deletion( self.organization_id, self, self._pending_fields, extra_fields_to_save=("config",) ) def reset_pending_deletion_field_names(self) -> bool: del self.config["pending_deletion_name"] return reset_pending_deletion_field_names( self.organization_id, self, self._pending_fields, extra_fields_to_save=("config",) ) def delete_pending_deletion_option(self) -> None: delete_pending_deletion_option(self.organization_id, self) @classmethod def sanitize_relocation_json( cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None ) -> None: model_name = get_model_name(cls) if model_name is None else model_name super().sanitize_relocation_json(json, sanitizer, model_name) sanitizer.set_json(json, SanitizableField(model_name, "config"), {}) sanitizer.set_string(json, SanitizableField(model_name, "external_id")) sanitizer.set_string(json, SanitizableField(model_name, "provider")) json["fields"]["languages"] = "[]" def on_delete(instance, actor: RpcUser | None = None, **kwargs): """ Remove webhooks for repository providers that use repository level webhooks. This is called from sentry.deletions.tasks.run_deletion() """ # If there is no provider, we don't have any webhooks, etc to delete if not instance.provider: return def handle_exception(e): from sentry.exceptions import InvalidIdentity, PluginError from sentry.shared_integrations.exceptions import IntegrationError if isinstance(e, (IntegrationError, PluginError, InvalidIdentity)): error = str(e) else: error = "An unknown error occurred" if actor is not None: msg = instance.generate_delete_fail_email(error) msg.send_async(to=[actor.email]) if instance.has_integration_provider(): try: instance.get_provider().on_delete_repository(repo=instance) except Exception as exc: handle_exception(exc) else: try: instance.get_provider().delete_repository(repo=instance, actor=actor) except Exception as exc: handle_exception(exc) pending_delete.connect(on_delete, sender=Repository, weak=False) pre_delete.connect( lambda instance, **k: instance.delete_pending_deletion_option(), sender=Repository, weak=False, )
Repository
python
scrapy__scrapy
tests/test_utils_request.py
{ "start": 8282, "end": 11507 }
class ____: def test_include_headers(self): class RequestFingerprinter: def fingerprint(self, request): return fingerprint(request, include_headers=["X-ID"]) settings = { "REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter, } crawler = get_crawler(settings_dict=settings) r1 = Request("http://www.example.com", headers={"X-ID": "1"}) fp1 = crawler.request_fingerprinter.fingerprint(r1) r2 = Request("http://www.example.com", headers={"X-ID": "2"}) fp2 = crawler.request_fingerprinter.fingerprint(r2) assert fp1 != fp2 def test_dont_canonicalize(self): class RequestFingerprinter: cache = WeakKeyDictionary() def fingerprint(self, request): if request not in self.cache: fp = sha1() fp.update(to_bytes(request.url)) self.cache[request] = fp.digest() return self.cache[request] settings = { "REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter, } crawler = get_crawler(settings_dict=settings) r1 = Request("http://www.example.com?a=1&a=2") fp1 = crawler.request_fingerprinter.fingerprint(r1) r2 = Request("http://www.example.com?a=2&a=1") fp2 = crawler.request_fingerprinter.fingerprint(r2) assert fp1 != fp2 def test_meta(self): class RequestFingerprinter: def fingerprint(self, request): if "fingerprint" in request.meta: return request.meta["fingerprint"] return fingerprint(request) settings = { "REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter, } crawler = get_crawler(settings_dict=settings) r1 = Request("http://www.example.com") fp1 = crawler.request_fingerprinter.fingerprint(r1) r2 = Request("http://www.example.com", meta={"fingerprint": "a"}) fp2 = crawler.request_fingerprinter.fingerprint(r2) r3 = Request("http://www.example.com", meta={"fingerprint": "a"}) fp3 = crawler.request_fingerprinter.fingerprint(r3) r4 = Request("http://www.example.com", meta={"fingerprint": "b"}) fp4 = crawler.request_fingerprinter.fingerprint(r4) assert fp1 != fp2 assert fp1 != fp4 assert fp2 != fp4 assert fp2 == fp3 def test_from_crawler(self): class RequestFingerprinter: @classmethod def from_crawler(cls, crawler): return cls(crawler) def __init__(self, crawler): self._fingerprint = crawler.settings["FINGERPRINT"] def fingerprint(self, request): return self._fingerprint settings = { "REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter, "FINGERPRINT": b"fingerprint", } crawler = get_crawler(settings_dict=settings) request = Request("http://www.example.com") fingerprint = crawler.request_fingerprinter.fingerprint(request) assert fingerprint == settings["FINGERPRINT"]
TestCustomRequestFingerprinter
python
cython__cython
Cython/Compiler/Scanning.py
{ "start": 5041, "end": 7675 }
class ____(SourceDescriptor): """ Represents a code source. A code source is a more generic abstraction for a "filename" (as sometimes the code doesn't come from a file). Instances of code sources are passed to Scanner.__init__ as the optional name argument and will be passed back when asking for the position()-tuple. """ def __init__(self, filename, path_description=None): filename = Utils.decode_filename(filename) self.filename = filename self.path_description = path_description or filename try: self._short_path_description = os.path.relpath(self.path_description) except ValueError: # path not under current directory => use complete file path self._short_path_description = self.path_description # Prefer relative paths to current directory (which is most likely the project root) over absolute paths. workdir = os.path.abspath('.') + os.sep self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename self.set_file_type_from_name(filename) self._cmp_name = filename self._lines = {} def get_lines(self, encoding=None, error_handling=None): # we cache the lines only the second time this is called, in # order to save memory when they are only used once key = (encoding, error_handling) lines = self._lines.get(key) if lines is not None: return lines with self.get_file_object(encoding=encoding, error_handling=error_handling) as f: lines = [line.rstrip() for line in f.readlines()] # Do not cache the first access, but add the key to remember that we already read it once. self._lines[key] = lines if key in self._lines else None return lines def get_file_object(self, encoding=None, error_handling=None): return Utils.open_source_file(self.filename, encoding, error_handling) def get_description(self): return self._short_path_description def get_error_description(self): path = self.filename cwd = Utils.decode_filename(os.getcwd() + os.path.sep) if path.startswith(cwd): return path[len(cwd):] return path def get_filenametable_entry(self): return self.file_path def __eq__(self, other): return isinstance(other, FileSourceDescriptor) and self.filename == other.filename def __hash__(self): return hash(self.filename) def __repr__(self): return "<FileSourceDescriptor:%s>" % self.filename
FileSourceDescriptor
python
chroma-core__chroma
chromadb/api/types.py
{ "start": 46383, "end": 50981 }
class ____(Protocol[D]): """ A protocol for sparse vector functions. To implement a new sparse vector function, you need to implement the following methods at minimum: - __call__ For future compatibility, it is strongly recommended to also implement: - __init__ - name - build_from_config - get_config """ @abstractmethod def __call__(self, input: D) -> SparseVectors: ... def embed_query(self, input: D) -> SparseVectors: """ Get the embeddings for a query input. This method is optional, and if not implemented, the default behavior is to call __call__. """ return self.__call__(input) def __init_subclass__(cls) -> None: super().__init_subclass__() # Raise an exception if __call__ is not defined since it is expected to be defined call = getattr(cls, "__call__") def __call__(self: SparseEmbeddingFunction[D], input: D) -> SparseVectors: result = call(self, input) assert result is not None return validate_sparse_vectors(cast(SparseVectors, result)) setattr(cls, "__call__", __call__) def embed_with_retries( self, input: D, **retry_kwargs: Dict[str, Any] ) -> SparseVectors: return cast(SparseVectors, retry(**retry_kwargs)(self.__call__)(input)) # type: ignore[call-overload] @abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: """ Initialize the embedding function. Pass any arguments that will be needed to build the embedding function config. """ ... @staticmethod @abstractmethod def name() -> str: """ Return the name of the embedding function. """ ... @staticmethod @abstractmethod def build_from_config(config: Dict[str, Any]) -> "SparseEmbeddingFunction[D]": """ Build the embedding function from a config, which will be used to deserialize the embedding function. """ ... @abstractmethod def get_config(self) -> Dict[str, Any]: """ Return the config for the embedding function, which will be used to serialize the embedding function. """ ... def validate_config_update( self, old_config: Dict[str, Any], new_config: Dict[str, Any] ) -> None: """ Validate the update to the config. """ return @staticmethod def validate_config(config: Dict[str, Any]) -> None: """ Validate the config. """ return def validate_sparse_embedding_function( sparse_vector_function: SparseEmbeddingFunction[Embeddable], ) -> None: """Validate that a sparse vector function conforms to the SparseEmbeddingFunction protocol.""" function_signature = signature( sparse_vector_function.__class__.__call__ ).parameters.keys() protocol_signature = signature(SparseEmbeddingFunction.__call__).parameters.keys() if not function_signature == protocol_signature: raise ValueError( f"Expected SparseEmbeddingFunction.__call__ to have the following signature: {protocol_signature}, got {function_signature}\n" "Please see https://docs.trychroma.com/guides/embeddings for details of the SparseEmbeddingFunction interface.\n" ) # Index Configuration Types for Collection Schema def _create_extra_fields_validator(valid_fields: list[str]) -> Any: """Create a model validator that provides helpful error messages for invalid fields.""" @model_validator(mode="before") def validate_extra_fields(cls: Type[BaseModel], data: Any) -> Any: if isinstance(data, dict): invalid_fields = [k for k in data.keys() if k not in valid_fields] if invalid_fields: invalid_fields_str = ", ".join(f"'{f}'" for f in invalid_fields) class_name = cls.__name__ # Create a clear, actionable error message if len(invalid_fields) == 1: msg = ( f"'{invalid_fields[0]}' is not a valid field for {class_name}. " ) else: msg = f"Invalid fields for {class_name}: {invalid_fields_str}. " raise PydanticCustomError( "invalid_field", msg, {"invalid_fields": invalid_fields}, ) return data return validate_extra_fields
SparseEmbeddingFunction
python
redis__redis-py
redis/commands/search/field.py
{ "start": 3818, "end": 4473 }
class ____(Field): """ TagField is a tag-indexing field with simpler compression and tokenization. See http://redisearch.io/Tags/ """ SEPARATOR = "SEPARATOR" CASESENSITIVE = "CASESENSITIVE" def __init__( self, name: str, separator: str = ",", case_sensitive: bool = False, withsuffixtrie: bool = False, **kwargs, ): args = [Field.TAG, self.SEPARATOR, separator] if case_sensitive: args.append(self.CASESENSITIVE) if withsuffixtrie: args.append("WITHSUFFIXTRIE") Field.__init__(self, name, args=args, **kwargs)
TagField
python
Lightning-AI__lightning
src/lightning/pytorch/utilities/types.py
{ "start": 3214, "end": 3448 }
class ____(TypedDict, total=False): scheduler: Required[LRSchedulerTypeUnion] name: Optional[str] interval: str frequency: int reduce_on_plateau: bool monitor: Optional[str] strict: bool
LRSchedulerConfigType
python
astropy__astropy
astropy/nddata/nduncertainty.py
{ "start": 3015, "end": 19003 }
class ____(metaclass=ABCMeta): """This is the metaclass for uncertainty classes used with `NDData`. Parameters ---------- array : any type, optional The array or value (the parameter name is due to historical reasons) of the uncertainty. `numpy.ndarray`, `~astropy.units.Quantity` or `NDUncertainty` subclasses are recommended. If the `array` is `list`-like or `numpy.ndarray`-like it will be cast to a plain `numpy.ndarray`. Default is ``None``. unit : unit-like, optional Unit for the uncertainty ``array``. Strings that can be converted to a `~astropy.units.Unit` are allowed. Default is ``None``. copy : `bool`, optional Indicates whether to save the `array` as a copy. ``True`` copies it before saving, while ``False`` tries to save every parameter as reference. Note however that it is not always possible to save the input as reference. Default is ``True``. Raises ------ IncompatibleUncertaintiesException If given another `NDUncertainty`-like class as ``array`` if their ``uncertainty_type`` is different. """ def __init__(self, array=None, copy=True, unit=None): if isinstance(array, NDUncertainty): # Given an NDUncertainty class or subclass check that the type # is the same. if array.uncertainty_type != self.uncertainty_type: raise IncompatibleUncertaintiesException # Check if two units are given and take the explicit one then. if unit is not None and unit != array._unit: # TODO : Clarify it (see NDData.init for same problem)? log.info("overwriting Uncertainty's current unit with specified unit.") elif array._unit is not None: unit = array.unit array = array.array elif isinstance(array, Quantity): # Check if two units are given and take the explicit one then. if unit is not None and array.unit is not None and unit != array.unit: log.info("overwriting Quantity's current unit with specified unit.") elif array.unit is not None: unit = array.unit array = array.value if unit is None: self._unit = None else: self._unit = Unit(unit) self.array = deepcopy(array) if copy else array self.parent_nddata = None # no associated NDData - until it is set! @property @abstractmethod def uncertainty_type(self): """`str` : Short description of the type of uncertainty. Defined as abstract property so subclasses *have* to override this. """ return None @property def supports_correlated(self): """`bool` : Supports uncertainty propagation with correlated uncertainties? .. versionadded:: 1.2 """ return False @property def array(self): """`numpy.ndarray` : the uncertainty's value.""" return self._array @array.setter def array(self, value): if isinstance(value, (list, np.ndarray)): value = np.asarray(value) self._array = value @property def unit(self): """`~astropy.units.Unit` : The unit of the uncertainty, if any.""" return self._unit @unit.setter def unit(self, value): """ The unit should be set to a value consistent with the parent NDData unit and the uncertainty type. """ if value is not None: # Check the hidden attribute below, not the property. The property # raises an exception if there is no parent_nddata. if self._parent_nddata is not None: parent_unit = self.parent_nddata.unit try: # Check for consistency with the unit of the parent_nddata self._data_unit_to_uncertainty_unit(parent_unit).to(value) except UnitConversionError: raise UnitConversionError( f"Unit {value} is incompatible with unit {parent_unit} of " "parent nddata" ) self._unit = Unit(value) else: self._unit = value @property def quantity(self): """ This uncertainty as an `~astropy.units.Quantity` object. """ return Quantity(self.array, self.unit, copy=False, dtype=self.array.dtype) @property def parent_nddata(self): """`NDData` : reference to `NDData` instance with this uncertainty. In case the reference is not set uncertainty propagation will not be possible since propagation might need the uncertain data besides the uncertainty. """ no_parent_message = "uncertainty is not associated with an NDData object" parent_lost_message = ( "the associated NDData object was deleted and cannot be accessed " "anymore. You can prevent the NDData object from being deleted by " "assigning it to a variable. If this happened after unpickling " "make sure you pickle the parent not the uncertainty directly." ) try: parent = self._parent_nddata except AttributeError: raise MissingDataAssociationException(no_parent_message) else: if parent is None: raise MissingDataAssociationException(no_parent_message) else: # The NDData is saved as weak reference so we must call it # to get the object the reference points to. However because # we have a weak reference here it's possible that the parent # was deleted because its reference count dropped to zero. if isinstance(self._parent_nddata, weakref.ref): resolved_parent = self._parent_nddata() if resolved_parent is None: log.info(parent_lost_message) return resolved_parent else: log.info("parent_nddata should be a weakref to an NDData object.") return self._parent_nddata @parent_nddata.setter def parent_nddata(self, value): if value is not None and not isinstance(value, weakref.ref): # Save a weak reference on the uncertainty that points to this # instance of NDData. Direct references should NOT be used: # https://github.com/astropy/astropy/pull/4799#discussion_r61236832 value = weakref.ref(value) # Set _parent_nddata here and access below with the property because value # is a weakref self._parent_nddata = value # set uncertainty unit to that of the parent if it was not already set, unless initializing # with empty parent (Value=None) if value is not None: parent_unit = self.parent_nddata.unit # this will get the unit for masked quantity input: parent_data_unit = getattr(self.parent_nddata.data, "unit", None) if parent_unit is None and parent_data_unit is None: self.unit = None elif self.unit is None and parent_unit is not None: # Set the uncertainty's unit to the appropriate value self.unit = self._data_unit_to_uncertainty_unit(parent_unit) elif parent_data_unit is not None: # if the parent_nddata object has a unit, use it: self.unit = self._data_unit_to_uncertainty_unit(parent_data_unit) else: # Check that units of uncertainty are compatible with those of # the parent. If they are, no need to change units of the # uncertainty or the data. If they are not, let the user know. unit_from_data = self._data_unit_to_uncertainty_unit(parent_unit) try: unit_from_data.to(self.unit) except UnitConversionError: raise UnitConversionError( f"Unit {self.unit} of uncertainty " f"incompatible with unit {parent_unit} of " "data" ) @abstractmethod def _data_unit_to_uncertainty_unit(self, value): """ Subclasses must override this property. It should take in a data unit and return the correct unit for the uncertainty given the uncertainty type. """ return None def __repr__(self): prefix = self.__class__.__name__ + "(" try: body = np.array2string(self.array, separator=", ", prefix=prefix) except AttributeError: # In case it wasn't possible to use array2string body = str(self.array) return f"{prefix}{body})" def __getstate__(self): # Because of the weak reference the class wouldn't be picklable. try: return self._array, self._unit, self.parent_nddata except MissingDataAssociationException: # In case there's no parent return self._array, self._unit, None def __setstate__(self, state): if len(state) != 3: raise TypeError("The state should contain 3 items.") self._array = state[0] self._unit = state[1] parent = state[2] if parent is not None: parent = weakref.ref(parent) self._parent_nddata = parent def __getitem__(self, item): """Normal slicing on the array, keep the unit and return a reference.""" return self.__class__(self.array[item], unit=self.unit, copy=False) def propagate(self, operation, other_nddata, result_data, correlation, axis=None): """Calculate the resulting uncertainty given an operation on the data. .. versionadded:: 1.2 Parameters ---------- operation : callable The operation that is performed on the `NDData`. Supported are `numpy.add`, `numpy.subtract`, `numpy.multiply` and `numpy.true_divide` (or `numpy.divide`). other_nddata : `NDData` instance The second operand in the arithmetic operation. result_data : `~astropy.units.Quantity` or ndarray The result of the arithmetic operations on the data. correlation : `numpy.ndarray` or number The correlation (rho) is defined between the uncertainties in sigma_AB = sigma_A * sigma_B * rho. A value of ``0`` means uncorrelated operands. axis : int or tuple of ints, optional Axis over which to perform a collapsing operation. Returns ------- resulting_uncertainty : `NDUncertainty` instance Another instance of the same `NDUncertainty` subclass containing the uncertainty of the result. Raises ------ ValueError If the ``operation`` is not supported or if correlation is not zero but the subclass does not support correlated uncertainties. Notes ----- First this method checks if a correlation is given and the subclass implements propagation with correlated uncertainties. Then the second uncertainty is converted (or an Exception is raised) to the same class in order to do the propagation. Then the appropriate propagation method is invoked and the result is returned. """ # Check if the subclass supports correlation if not self.supports_correlated: if isinstance(correlation, np.ndarray) or correlation != 0: raise ValueError( f"{type(self).__name__} does not support uncertainty propagation" " with correlation." ) if other_nddata is not None: # Get the other uncertainty (and convert it to a matching one) other_uncert = self._convert_uncertainty(other_nddata.uncertainty) if operation.__name__ == "add": result = self._propagate_add(other_uncert, result_data, correlation) elif operation.__name__ == "subtract": result = self._propagate_subtract( other_uncert, result_data, correlation ) elif operation.__name__ == "multiply": result = self._propagate_multiply( other_uncert, result_data, correlation ) elif operation.__name__ in ["true_divide", "divide"]: result = self._propagate_divide(other_uncert, result_data, correlation) else: raise ValueError(f"unsupported operation: {operation.__name__}") else: # assume this is a collapsing operation: result = self._propagate_collapse(operation, axis) return self.__class__(array=result, copy=False) def _convert_uncertainty(self, other_uncert): """Checks if the uncertainties are compatible for propagation. Checks if the other uncertainty is `NDUncertainty`-like and if so verify that the uncertainty_type is equal. If the latter is not the case try returning ``self.__class__(other_uncert)``. Parameters ---------- other_uncert : `NDUncertainty` subclass The other uncertainty. Returns ------- other_uncert : `NDUncertainty` subclass but converted to a compatible `NDUncertainty` subclass if possible and necessary. Raises ------ IncompatibleUncertaintiesException: If the other uncertainty cannot be converted to a compatible `NDUncertainty` subclass. """ if isinstance(other_uncert, NDUncertainty): if self.uncertainty_type == other_uncert.uncertainty_type: return other_uncert else: return self.__class__(other_uncert) else: raise IncompatibleUncertaintiesException @abstractmethod def _propagate_add(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_subtract(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_multiply(self, other_uncert, result_data, correlation): return None @abstractmethod def _propagate_divide(self, other_uncert, result_data, correlation): return None def represent_as(self, other_uncert): """Convert this uncertainty to a different uncertainty type. Parameters ---------- other_uncert : `NDUncertainty` subclass The `NDUncertainty` subclass to convert to. Returns ------- resulting_uncertainty : `NDUncertainty` instance An instance of ``other_uncert`` subclass containing the uncertainty converted to the new uncertainty type. Raises ------ TypeError If either the initial or final subclasses do not support conversion, a `TypeError` is raised. """ as_variance = getattr(self, "_convert_to_variance", None) if as_variance is None: raise TypeError( f"{type(self)} does not support conversion to another uncertainty type." ) from_variance = getattr(other_uncert, "_convert_from_variance", None) if from_variance is None: raise TypeError( f"{other_uncert.__name__} does not support conversion from " "another uncertainty type." ) return from_variance(as_variance())
NDUncertainty
python
modin-project__modin
modin/tests/pandas/extensions/test_base_extensions.py
{ "start": 5672, "end": 11390 }
class ____: def test_override_loc_for_one_backend(self, Backend1, data_class): modin_object = data_class([1, 2, 3]) @register_base_accessor(name="loc", backend=Backend1) @property def my_loc(self): return self.index[0] assert isinstance(modin_object.set_backend(Backend1).loc, int) assert modin_object.set_backend(Backend1).loc == 0 @pytest.mark.parametrize("backend", ["pandas", "python_test"]) def test_override_loc_for_all_backends(self, backend, data_class): @register_base_accessor(name="loc", backend=None) @property def my_loc(self): return self.index[0] modin_object = data_class([1, 2, 3]) assert isinstance(modin_object.set_backend(backend).loc, int) assert modin_object.set_backend(backend).loc == 0 def test_add_deletable_property(self, Backend1, data_class): # register a public property `public_property_name` that is backed by # a private attribute `private_property_name`. public_property_name = "property_name" private_property_name = "_property_name" def get_property(self): return getattr(self, private_property_name) def set_property(self, value): setattr(self, private_property_name, value) def del_property(self): delattr(self, private_property_name) register_base_accessor(name=public_property_name, backend=Backend1)( property(fget=get_property, fset=set_property, fdel=del_property) ) modin_object = data_class({"a": [1, 2, 3], "b": [4, 5, 6]}) assert not hasattr(modin_object, public_property_name) backend_object = modin_object.set_backend(Backend1) setattr(backend_object, public_property_name, "value") assert getattr(backend_object, public_property_name) == "value" delattr(backend_object, public_property_name) # check that the deletion works. assert not hasattr(backend_object, private_property_name) @pytest.mark.parametrize("backend", ["pandas", "python_test"]) def test_add_deletable_property_for_all_backends(self, data_class, backend): # register a public property `public_property_name` that is backed by # a private attribute `private_property_name`. public_property_name = "property_name" private_property_name = "_property_name" def get_property(self): return getattr(self, private_property_name) def set_property(self, value): setattr(self, private_property_name, value) def del_property(self): delattr(self, private_property_name) register_base_accessor(name=public_property_name)( property(fget=get_property, fset=set_property, fdel=del_property) ) modin_object = data_class({"a": [1, 2, 3], "b": [4, 5, 6]}).set_backend(backend) setattr(modin_object, public_property_name, "value") assert getattr(modin_object, public_property_name) == "value" delattr(modin_object, public_property_name) # check that the deletion works. assert not hasattr(modin_object, private_property_name) def test_get_property_that_raises_attribute_error_on_get_modin_issue_7562( self, data_class ): def get_property(self): raise AttributeError register_base_accessor(name="extension_property")(property(fget=get_property)) modin_object = data_class() with pytest.raises(AttributeError): getattr(modin_object, "extension_property") def test_non_settable_extension_property(self, Backend1, data_class): modin_object = data_class([0]) property_name = "property_name" register_base_accessor(name=property_name, backend=Backend1)( property(fget=(lambda self: 4)) ) assert not hasattr(modin_object, property_name) backend_object = modin_object.set_backend(Backend1) assert getattr(backend_object, property_name) == 4 with pytest.raises(AttributeError): setattr(backend_object, property_name, "value") def test_delete_non_deletable_extension_property(self, Backend1, data_class): modin_object = data_class([0]) property_name = "property_name" register_base_accessor(name=property_name, backend=Backend1)( property(fget=(lambda self: "value")) ) assert not hasattr(modin_object, property_name) backend_object = modin_object.set_backend(Backend1) assert hasattr(backend_object, property_name) with pytest.raises(AttributeError): delattr(backend_object, property_name) @pytest.mark.parametrize("data_class", [pd.DataFrame, pd.Series]) def test_deleting_extension_that_is_not_property_raises_attribute_error( Backend1, data_class ): expected_string_val = "Some string value" method_name = "new_method" @register_base_accessor(name=method_name, backend=Backend1) def my_method_implementation(self): return expected_string_val modin_object = data_class([0]).set_backend(Backend1) assert hasattr(data_class, method_name) with pytest.raises(AttributeError): delattr(modin_object, method_name) def test_disallowed_extensions(Backend1, non_extendable_attribute_name): with pytest.raises( ValueError, match=re.escape( f"Cannot register an extension with the reserved name {non_extendable_attribute_name}." ), ): register_base_accessor(name=non_extendable_attribute_name, backend=Backend1)( "unused_value" )
TestProperty
python
joke2k__faker
faker/providers/internet/id_ID/__init__.py
{ "start": 46, "end": 562 }
class ____(InternetProvider): tlds = ( # From https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains "com", "org", "net", "int", "edu", "gov", "mil", # From https://id.wikipedia.org/wiki/.id "id", "ac.id", "biz.id", "co.id", "desa.id", "go.id", "mil.id", "my.id", "net.id", "or.id", "ponpes.id", "sch.id", "web.id", )
Provider
python
django__django
tests/admin_views/models.py
{ "start": 23535, "end": 23845 }
class ____(models.Model): """ Issue #20522 Model where the validation of child foreign-key relationships depends on validation of the parent """ some_required_info = models.PositiveIntegerField() family_name = models.CharField(max_length=255, blank=False)
ParentWithDependentChildren
python
bokeh__bokeh
tests/unit/bokeh/core/property/test_container.py
{ "start": 4402, "end": 5915 }
class ____: def test_init(self) -> None: with pytest.raises(TypeError): bcpc.Dict() def test_valid(self) -> None: prop = bcpc.Dict(String, bcpc.List(Int)) assert prop.is_valid({}) assert prop.is_valid({"foo": [1,2,3]}) def test_invalid(self) -> None: prop = bcpc.Dict(String, bcpc.List(Int)) assert not prop.is_valid(None) assert not prop.is_valid(False) assert not prop.is_valid(True) assert not prop.is_valid(0) assert not prop.is_valid(1) assert not prop.is_valid(0.0) assert not prop.is_valid(1.0) assert not prop.is_valid(1.0+1.0j) assert not prop.is_valid("") assert not prop.is_valid(()) assert not prop.is_valid([]) assert not prop.is_valid({"foo": [1,2,3.5]}) assert not prop.is_valid(np.array([1,2,3])) assert not prop.is_valid(_TestHasProps()) assert not prop.is_valid(_TestModel()) def test_has_ref(self) -> None: prop = bcpc.Dict(String, Int) assert not prop.has_ref prop = bcpc.Dict(String, Instance(_TestModel)) assert prop.has_ref def test_str(self) -> None: prop = bcpc.Dict(String, Int) assert str(prop) == "Dict(String, Int)" def test_wrap(self) -> None: prop = bcpc.Dict(String, Int) wrapped = prop.wrap({"foo": 10}) assert isinstance(wrapped, PropertyValueDict) assert prop.wrap(wrapped) is wrapped
Test_Dict
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 422453, "end": 423293 }
class ____(ExprNode): # Initialize CyFunction.func_classobj is_temp = True type = py_object_type subexprs = [] is_active = False def analyse_expressions(self, env): return self def generate_result_code(self, code): assert self.is_active code.putln( '%s = PyList_New(0); %s' % ( self.result(), code.error_goto_if_null(self.result(), self.pos))) self.generate_gotref(code) def generate_injection_code(self, code, classobj_cname): assert self.is_active code.globalstate.use_utility_code( UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c")) code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % ( self.result(), classobj_cname))
ClassCellInjectorNode
python
conda__conda
conda/env/env.py
{ "start": 14324, "end": 15361 }
class ____(EnvironmentYaml): """A class representing an ``environment.yaml`` file""" @deprecated("25.9", "26.3") def get_filename(filename): """Expand filename if local path or return the ``url``""" url_scheme = filename.split("://", 1)[0] if url_scheme in CONDA_SESSION_SCHEMES: return filename else: return expand(filename) def print_result(args, prefix, result): """Print the result of an install operation""" if context.json: if result["conda"] is None and result["pip"] is None: common.stdout_json_success( message="All requested packages already installed." ) else: if result["conda"] is not None: actions = result["conda"] else: actions = {} if result["pip"] is not None: actions["PIP"] = result["pip"] common.stdout_json_success(prefix=prefix, actions=actions) else: common.print_activate(args.name or prefix)
Environment
python
pyca__cryptography
src/cryptography/utils.py
{ "start": 2189, "end": 4128 }
class ____(types.ModuleType): def __init__(self, module: types.ModuleType): super().__init__(module.__name__) self.__dict__["_module"] = module def __getattr__(self, attr: str) -> object: obj = getattr(self._module, attr) if isinstance(obj, _DeprecatedValue): warnings.warn(obj.message, obj.warning_class, stacklevel=2) obj = obj.value return obj def __setattr__(self, attr: str, value: object) -> None: setattr(self._module, attr, value) def __delattr__(self, attr: str) -> None: obj = getattr(self._module, attr) if isinstance(obj, _DeprecatedValue): warnings.warn(obj.message, obj.warning_class, stacklevel=2) delattr(self._module, attr) def __dir__(self) -> Sequence[str]: return ["_module", *dir(self._module)] def deprecated( value: object, module_name: str, message: str, warning_class: type[Warning], name: str | None = None, ) -> _DeprecatedValue: module = sys.modules[module_name] if not isinstance(module, _ModuleWithDeprecations): sys.modules[module_name] = module = _ModuleWithDeprecations(module) dv = _DeprecatedValue(value, message, warning_class) # Maintain backwards compatibility with `name is None` for pyOpenSSL. if name is not None: setattr(module, name, dv) return dv def cached_property(func: Callable) -> property: cached_name = f"_cached_{func}" sentinel = object() def inner(instance: object): cache = getattr(instance, cached_name, sentinel) if cache is not sentinel: return cache result = func(instance) setattr(instance, cached_name, result) return result return property(inner) # Python 3.10 changed representation of enums. We use well-defined object # representation and string representation from Python 3.9.
_ModuleWithDeprecations
python
huggingface__transformers
src/transformers/models/clip/modeling_clip.py
{ "start": 29818, "end": 37207 }
class ____(CLIPPreTrainedModel): config: CLIPConfig _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer", "CLIPVisionEmbeddings"] def __init__(self, config: CLIPConfig): super().__init__(config) if not isinstance(config.text_config, CLIPTextConfig): raise TypeError( "config.text_config is expected to be of type CLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPVisionConfig): raise TypeError( "config.vision_config is expected to be of type CLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_model = CLIPTextModel._from_config(text_config) self.text_model = text_model.text_model vision_model = CLIPVisionModel._from_config(vision_config) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @filter_out_non_signature_kwargs() @auto_docstring def get_text_features( self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```""" text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) pooled_output = text_outputs.pooler_output text_features = self.text_projection(pooled_output) return text_features @filter_out_non_signature_kwargs() @auto_docstring def get_image_features( self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`]. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, CLIPModel >>> from transformers.image_utils import load_image >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs.pooler_output image_features = self.visual_projection(pooled_output) return image_features @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> CLIPOutput: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, CLIPModel >>> from transformers.image_utils import load_image >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> with torch.inference_mode(): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **kwargs, ) image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / _get_vector_norm(image_embeds) text_embeds = text_embeds / _get_vector_norm(text_embeds) # cosine similarity as logits logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) logits_per_text = logits_per_text * self.logit_scale.exp().to(text_embeds.device) logits_per_image = logits_per_text.t() loss = None if return_loss: loss = clip_loss(logits_per_text) return CLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @auto_docstring
CLIPModel
python
eth-brownie__brownie
brownie/test/strategies.py
{ "start": 1559, "end": 7990 }
class ____(DeferredStrategy): def __init__(self, fn: Callable, repr_target: str) -> None: super().__init__(fn) self._repr_target = repr_target def __repr__(self): return f"sampled_from({self._repr_target})" def _exclude_filter(fn: Callable) -> Callable: def wrapper(*args: Tuple, exclude: Any = None, **kwargs: int) -> SearchStrategy: strat = fn(*args, **kwargs) if exclude is None: return strat if callable(exclude): return strat.filter(exclude) if not isinstance(exclude, Iterable) or isinstance(exclude, str): exclude = (exclude,) strat = strat.filter(lambda k: k not in exclude) # make the filter repr more readable repr_ = strat.__repr__().rsplit(").filter", maxsplit=1)[0] strat._LazyStrategy__representation = f"{repr_}, exclude={exclude})" return strat return wrapper def _check_numeric_bounds( type_str: str, min_value: NumberType, max_value: NumberType, num_class: type ) -> Tuple: lower, upper = get_int_bounds(type_str) min_final = lower if min_value is None else num_class(min_value) max_final = upper if max_value is None else num_class(max_value) if min_final < lower: raise ValueError(f"min_value '{min_value}' is outside allowable range for {type_str}") if max_final > upper: raise ValueError(f"max_value '{max_value}' is outside allowable range for {type_str}") if min_final > max_final: raise ValueError(f"min_value '{min_final}' is greater than max_value '{max_final}'") return min_final, max_final @_exclude_filter def _integer_strategy( type_str: str, min_value: Optional[int] = None, max_value: Optional[int] = None ) -> SearchStrategy: min_value, max_value = _check_numeric_bounds(type_str, min_value, max_value, Wei) return st.integers(min_value=min_value, max_value=max_value) @_exclude_filter def _decimal_strategy( min_value: NumberType = None, max_value: NumberType = None, places: int = 10 ) -> SearchStrategy: min_value, max_value = _check_numeric_bounds("int128", min_value, max_value, Fixed) return st.decimals(min_value=min_value, max_value=max_value, places=places) @_exclude_filter def _address_strategy(length: Optional[int] = None, include: list = []) -> SearchStrategy: return _DeferredStrategyRepr( lambda: st.sampled_from(list(network.accounts)[:length] + include), "accounts" ) @_exclude_filter def _bytes_strategy( abi_type: BasicType, min_size: Optional[int] = None, max_size: Optional[int] = None ) -> SearchStrategy: size = abi_type.sub if not size: return st.binary(min_size=min_size or 1, max_size=max_size or 64) if size < 1 or size > 32: raise ValueError(f"Invalid type: {abi_type.to_type_str()}") if min_size is not None or max_size is not None: raise TypeError("Cannot specify size for fixed length bytes strategy") return st.binary(min_size=size, max_size=size) @_exclude_filter def _string_strategy(min_size: int = 0, max_size: int = 64) -> SearchStrategy: return st.text(min_size=min_size, max_size=max_size) def _get_array_length(var_str: str, length: ArrayLengthType, dynamic_len: int) -> int: if not isinstance(length, (list, int)): raise TypeError(f"{var_str} must be of type int or list, not '{type(length).__name__}''") if not isinstance(length, list): return length if len(length) != dynamic_len: raise ValueError( f"Length of '{var_str}' must equal the number of dynamic " f"dimensions for the given array ({dynamic_len})" ) return length.pop() def _array_strategy( abi_type: BasicType, min_length: ArrayLengthType = 1, max_length: ArrayLengthType = 8, unique: bool = False, **kwargs: Any, ) -> SearchStrategy: if abi_type.arrlist[-1]: min_len = max_len = abi_type.arrlist[-1][0] else: dynamic_len = len([i for i in abi_type.arrlist if not i]) min_len = _get_array_length("min_length", min_length, dynamic_len) max_len = _get_array_length("max_length", max_length, dynamic_len) if abi_type.item_type.is_array: kwargs.update(min_length=min_length, max_length=max_length, unique=unique) base_strategy = strategy(abi_type.item_type.to_type_str(), **kwargs) strat = st.lists(base_strategy, min_size=min_len, max_size=max_len, unique=unique) # swap 'size' for 'length' in the repr repr_ = "length".join(strat.__repr__().rsplit("size", maxsplit=2)) strat._LazyStrategy__representation = repr_ return strat def _tuple_strategy(abi_type: TupleType) -> SearchStrategy: strategies = [strategy(i.to_type_str()) for i in abi_type.components] return st.tuples(*strategies) def contract_strategy(contract_name: str) -> SearchStrategy: def _contract_deferred(name): for proj in project.get_loaded_projects(): if name in proj.dict(): return st.sampled_from(list(proj[name])) raise NameError(f"Contract '{name}' does not exist in any active projects") return _DeferredStrategyRepr(lambda: _contract_deferred(contract_name), contract_name) @overload def strategy( type_str: Literal["address"], length: Optional[int] = None, include: list = [], ) -> SearchStrategy: ... @overload def strategy( type_str: Union[EvmIntType, EvmUintType], min_value: Optional[int] = None, max_value: Optional[int] = None, ) -> SearchStrategy: ... def strategy(type_str: str, **kwargs: Any) -> SearchStrategy: type_str = TYPE_STR_TRANSLATIONS.get(type_str, type_str) if type_str == "fixed168x10": return _decimal_strategy(**kwargs) if type_str == "address": return _address_strategy(**kwargs) if type_str == "bool": return st.booleans(**kwargs) if type_str == "string": return _string_strategy(**kwargs) abi_type = parse(type_str) if abi_type.is_array: return _array_strategy(abi_type, **kwargs) if isinstance(abi_type, TupleType): return _tuple_strategy(abi_type, **kwargs) base = abi_type.base if base in ("int", "uint"): return _integer_strategy(type_str, **kwargs) if base == "bytes": return _bytes_strategy(abi_type, **kwargs) raise ValueError(f"No strategy available for type: {type_str}")
_DeferredStrategyRepr
python
aio-libs__aiohttp
aiohttp/multipart.py
{ "start": 5683, "end": 6888 }
class ____: """Wrapper around the MultipartReader. It takes care about underlying connection and close it when it needs in. """ def __init__( self, resp: "ClientResponse", stream: "MultipartReader", ) -> None: self.resp = resp self.stream = stream def __aiter__(self) -> "MultipartResponseWrapper": return self async def __anext__( self, ) -> Union["MultipartReader", "BodyPartReader"]: part = await self.next() if part is None: raise StopAsyncIteration return part def at_eof(self) -> bool: """Returns True when all response data had been read.""" return self.resp.content.at_eof() async def next( self, ) -> Union["MultipartReader", "BodyPartReader"] | None: """Emits next multipart reader object.""" item = await self.stream.next() if self.stream.at_eof(): await self.release() return item async def release(self) -> None: """Release the connection gracefully. All remaining content is read to the void. """ self.resp.release()
MultipartResponseWrapper
python
django__django
tests/generic_views/views.py
{ "start": 5759, "end": 5832 }
class ____(BookConfig, generic.TodayArchiveView): pass
BookTodayArchive
python
facebook__pyre-check
client/commands/expression_level_coverage.py
{ "start": 1061, "end": 1175 }
class ____(json_mixins.CamlCaseAndExcludeJsonMixin): start: Pair stop: Pair @dataclass(frozen=True)
Location
python
astropy__astropy
astropy/io/fits/hdu/hdulist.py
{ "start": 8663, "end": 60916 }
class ____(list, _Verify): """ HDU list class. This is the top-level FITS object. When a FITS file is opened, a `HDUList` object is returned. """ def __init__(self, hdus=[], file=None): """ Construct a `HDUList` object. Parameters ---------- hdus : BaseHDU or sequence thereof, optional The HDU object(s) to comprise the `HDUList`. Should be instances of HDU classes like `ImageHDU` or `BinTableHDU`. file : file-like, bytes, optional The opened physical file associated with the `HDUList` or a bytes object containing the contents of the FITS file. """ if isinstance(file, bytes): self._data = file self._file = None else: self._file = file self._data = None # For internal use only--the keyword args passed to fitsopen / # HDUList.fromfile/string when opening the file self._open_kwargs = {} self._in_read_next_hdu = False # If we have read all the HDUs from the file or not # The assumes that all HDUs have been written when we first opened the # file; we do not currently support loading additional HDUs from a file # while it is being streamed to. In the future that might be supported # but for now this is only used for the purpose of lazy-loading of # existing HDUs. if file is None: self._read_all = True elif self._file is not None: # Should never attempt to read HDUs in ostream mode self._read_all = self._file.mode == "ostream" else: self._read_all = False if hdus is None: hdus = [] # can take one HDU, as well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise TypeError("Invalid input for HDUList.") for idx, hdu in enumerate(hdus): if not isinstance(hdu, _BaseHDU): raise TypeError(f"Element {idx} in the HDUList input is not an HDU.") super().__init__(hdus) if file is None: # Only do this when initializing from an existing list of HDUs # When initializing from a file, this will be handled by the # append method after the first HDU is read self.update_extend() def __len__(self): if not self._in_read_next_hdu: self.readall() return super().__len__() def __repr__(self): # Special case: if the FITS file is located on a remote file system # and has not been fully read yet, we return a simplified repr to # avoid downloading the entire file. We can tell that a file is remote # from the fact that the ``fsspec`` package was used to open it. is_fsspec_file = self._file and "fsspec" in str( self._file._file.__class__.__bases__ ) if not self._read_all and is_fsspec_file: return f"{type(self)} (partially read)" # In order to correctly repr an HDUList we need to load all the # HDUs as well self.readall() return super().__repr__() def __iter__(self): # While effectively this does the same as: # for idx in range(len(self)): # yield self[idx] # the more complicated structure is here to prevent the use of len(), # which would break the lazy loading for idx in itertools.count(): try: yield self[idx] except IndexError: break def __getitem__(self, key): """ Get an HDU from the `HDUList`, indexed by number or name. """ # If the key is a slice we need to make sure the necessary HDUs # have been loaded before passing the slice on to super. if isinstance(key, slice): max_idx = key.stop # Check for and handle the case when no maximum was # specified (e.g. [1:]). if max_idx is None: # We need all of the HDUs, so load them # and reset the maximum to the actual length. max_idx = len(self) # Just in case the max_idx is negative... max_idx = self._positive_index_of(max_idx) number_loaded = super().__len__() if max_idx >= number_loaded: # We need more than we have, try loading up to and including # max_idx. Note we do not try to be clever about skipping HDUs # even though key.step might conceivably allow it. for i in range(number_loaded, max_idx): # Read until max_idx or to the end of the file, whichever # comes first. if not self._read_next_hdu(): break try: hdus = super().__getitem__(key) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) else: return HDUList(hdus) # Originally this used recursion, but hypothetically an HDU with # a very large number of HDUs could blow the stack, so use a loop # instead try: return self._try_while_unread_hdus( super().__getitem__, self._positive_index_of(key) ) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) def __contains__(self, item): """ Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid extension specification (e.g., integer extension number, extension name, or a tuple of extension name and an extension version) of a ``HDU`` in ``self``. """ try: self._try_while_unread_hdus(self.index_of, item) except (KeyError, ValueError): return False return True def __setitem__(self, key, hdu): """ Set an HDU to the `HDUList`, indexed by number or name. """ _key = self._positive_index_of(key) if isinstance(hdu, (slice, list)): if _is_int(_key): raise ValueError("An element in the HDUList must be an HDU.") for item in hdu: if not isinstance(item, _BaseHDU): raise ValueError(f"{item} is not an HDU.") else: if not isinstance(hdu, _BaseHDU): raise ValueError(f"{hdu} is not an HDU.") try: self._try_while_unread_hdus(super().__setitem__, _key, hdu) except IndexError: raise IndexError(f"Extension {key} is out of bound or not found.") self._resize = True self._truncate = False def __delitem__(self, key): """ Delete an HDU from the `HDUList`, indexed by number or name. """ if isinstance(key, slice): end_index = len(self) else: key = self._positive_index_of(key) end_index = len(self) - 1 self._try_while_unread_hdus(super().__delitem__, key) if key == end_index or (key == -1 and not self._resize): self._truncate = True else: self._truncate = False self._resize = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): output_verify = self._open_kwargs.get("output_verify", "exception") self.close(output_verify=output_verify) @classmethod def fromfile( cls, fileobj, mode=None, memmap=None, save_backup=False, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, **kwargs, ): """ Creates an `HDUList` instance from a file-like object. The actual implementation of ``fitsopen()``, and generally shouldn't be used directly. Use :func:`open` instead (and see its documentation for details of the parameters accepted by this method). """ return cls._readfrom( fileobj=fileobj, mode=mode, memmap=memmap, save_backup=save_backup, cache=cache, ignore_missing_simple=ignore_missing_simple, lazy_load_hdus=lazy_load_hdus, **kwargs, ) @classmethod def fromstring(cls, data, **kwargs): """ Creates an `HDUList` instance from a string or other in-memory data buffer containing an entire FITS file. Similar to :meth:`HDUList.fromfile`, but does not accept the mode or memmap arguments, as they are only relevant to reading from a file on disk. This is useful for interfacing with other libraries such as CFITSIO, and may also be useful for streaming applications. Parameters ---------- data : str, buffer-like, etc. A string or other memory buffer containing an entire FITS file. Buffer-like objects include :class:`~bytes`, :class:`~bytearray`, :class:`~memoryview`, and :class:`~numpy.ndarray`. It should be noted that if that memory is read-only (such as a Python string) the returned :class:`HDUList`'s data portions will also be read-only. **kwargs : dict Optional keyword arguments. See :func:`astropy.io.fits.open` for details. Returns ------- hdul : HDUList An :class:`HDUList` object representing the in-memory FITS file. """ try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype="ubyte", buffer=data) except TypeError: raise TypeError( f"The provided object {data} does not contain an underlying " "memory buffer. fromstring() requires an object that " "supports the buffer interface such as bytes, buffer, " "memoryview, ndarray, etc. This restriction is to ensure " "that efficient access to the array/table data is possible." ) return cls._readfrom(data=data, **kwargs) def fileinfo(self, index): """ Returns a dictionary detailing information about the locations of the indexed HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Parameters ---------- index : int Index of HDU for which info is to be returned. Returns ------- fileinfo : dict or None The dictionary details information about the locations of the indexed HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ======================================================== Key Value ========== ======================================================== file File object associated with the HDU filename Name of associated file object filemode Mode in which the file was opened (readonly, update, append, denywrite, ostream) resized Flag that when `True` indicates that the data has been resized since the last read/write so the returned values may not be valid. hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ======================================================== """ if self._file is not None: output = self[index].fileinfo() if not output: # OK, the HDU associated with this index is not yet # tied to the file associated with the HDUList. The only way # to get the file object is to check each of the HDU's in the # list until we find the one associated with the file. f = None for hdu in self: info = hdu.fileinfo() if info: f = info["file"] fm = info["filemode"] break output = { "file": f, "filemode": fm, "hdrLoc": None, "datLoc": None, "datSpan": None, } output["filename"] = self._file.name output["resized"] = self._wasresized() else: output = None return output def __copy__(self): """ Return a shallow copy of an HDUList. Returns ------- copy : `HDUList` A shallow copy of this `HDUList` object. """ return self[:] # Syntactic sugar for `__copy__()` magic method copy = __copy__ def __deepcopy__(self, memo=None): return HDUList([hdu.copy() for hdu in self]) def pop(self, index=-1): """Remove an item from the list and return it. Parameters ---------- index : int, str, tuple of (string, int), optional An integer value of ``index`` indicates the position from which ``pop()`` removes and returns an HDU. A string value or a tuple of ``(string, int)`` functions as a key for identifying the HDU to be removed and returned. If ``key`` is a tuple, it is of the form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous the numeric index must be used to index the duplicate HDU. Returns ------- hdu : BaseHDU The HDU object at position indicated by ``index`` or having name and version specified by ``index``. """ # Make sure that HDUs are loaded before attempting to pop self.readall() list_index = self.index_of(index) return super().pop(list_index) def insert(self, index, hdu): """ Insert an HDU into the `HDUList` at the given ``index``. Parameters ---------- index : int Index before which to insert the new HDU. hdu : BaseHDU The HDU object to insert """ if not isinstance(hdu, _BaseHDU): raise ValueError(f"{hdu} is not an HDU.") num_hdus = len(self) if index == 0 or num_hdus == 0: if num_hdus != 0: # We are inserting a new Primary HDU so we need to # make the current Primary HDU into an extension HDU. if isinstance(self[0], GroupsHDU): raise ValueError( "The current Primary HDU is a GroupsHDU. " "It can't be made into an extension HDU, " "so another HDU cannot be inserted before it." ) hdu1 = ImageHDU(self[0].data, self[0].header) # Insert it into position 1, then delete HDU at position 0. super().insert(1, hdu1) super().__delitem__(0) if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().insert(0, phdu) index = 1 else: if isinstance(hdu, GroupsHDU): raise ValueError("A GroupsHDU must be inserted as a Primary HDU.") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. hdu = ImageHDU(hdu.data, hdu.header) super().insert(index, hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def append(self, hdu): """ Append a new HDU to the `HDUList`. Parameters ---------- hdu : BaseHDU HDU to add to the `HDUList`. """ if not isinstance(hdu, _BaseHDU): raise ValueError("HDUList can only append an HDU.") # store BZERO and BSCALE if present bzero = hdu.header.get("BZERO") bscale = hdu.header.get("BSCALE") if len(self) > 0: if isinstance(hdu, GroupsHDU): raise ValueError("Can't append a GroupsHDU to a non-empty HDUList") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. # TODO: This isn't necessarily sufficient to copy the HDU; # _header_offset and friends need to be copied too. hdu = ImageHDU( hdu.data, hdu.header, do_not_scale_image_data=hdu._do_not_scale_image_data, ) else: if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary # HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU( hdu.data, hdu.header, do_not_scale_image_data=hdu._do_not_scale_image_data, ) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().append(phdu) # Add back BZERO and BSCALE if relevant if getattr(hdu, "_do_not_scale_image_data", False): if bzero is not None: hdu.header["BZERO"] = bzero if bscale is not None: hdu.header["BSCALE"] = bscale super().append(hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def index_of(self, key): """ Get the index of an HDU from the `HDUList`. Parameters ---------- key : int, str, tuple of (string, int) or BaseHDU The key identifying the HDU. If ``key`` is a tuple, it is of the form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous (it shouldn't be but it's not impossible) the numeric index must be used to index the duplicate HDU. When ``key`` is an HDU object, this function returns the index of that HDU object in the ``HDUList``. Returns ------- index : int The index of the HDU in the `HDUList`. Raises ------ ValueError If ``key`` is an HDU object and it is not found in the ``HDUList``. KeyError If an HDU specified by the ``key`` that is an extension number, extension name, or a tuple of extension name and version is not found in the ``HDUList``. """ if _is_int(key): return key elif isinstance(key, tuple): _key, _ver = key elif isinstance(key, _BaseHDU): return self.index(key) else: _key = key _ver = None if not isinstance(_key, str): raise KeyError( f"{type(self).__name__} indices must be integers, extension " f"names as strings, or (extname, version) tuples; got {_key}" ) _key = (_key.strip()).upper() found = None for idx, hdu in enumerate(self): name = hdu.name if isinstance(name, str): name = name.strip().upper() # 'PRIMARY' should always work as a reference to the first HDU if (name == _key or (_key == "PRIMARY" and idx == 0)) and ( _ver is None or _ver == hdu.ver ): found = idx break if found is None: raise KeyError(f"Extension {key!r} not found.") else: return found def _positive_index_of(self, key): """ Same as index_of, but ensures always returning a positive index or zero. (Really this should be called non_negative_index_of but it felt too long.) This means that if the key is a negative integer, we have to convert it to the corresponding positive index. This means knowing the length of the HDUList, which in turn means loading all HDUs. Therefore using negative indices on HDULists is inherently inefficient. """ index = self.index_of(key) if index >= 0: return index if abs(index) > len(self): raise IndexError(f"Extension {index} is out of bound or not found.") return len(self) + index def readall(self): """ Read data of all HDUs into memory. """ while self._read_next_hdu(): pass @ignore_sigint def flush(self, output_verify="fix", verbose=False): """ Force a write of the `HDUList` back to the file (for append and update modes only). Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print verbose messages """ if self._file.mode not in ("append", "update", "ostream"): warnings.warn( f"Flush for '{self._file.mode}' mode is not supported.", AstropyUserWarning, ) return save_backup = self._open_kwargs.get("save_backup", False) if save_backup and self._file.mode in ("append", "update"): filename = self._file.name if os.path.exists(filename): # The file doesn't actually exist anymore for some reason # then there's no point in trying to make a backup backup = filename + ".bak" idx = 1 while os.path.exists(backup): backup = filename + ".bak." + str(idx) idx += 1 warnings.warn( f"Saving a backup of {filename} to {backup}.", AstropyUserWarning ) try: shutil.copy(filename, backup) except OSError as exc: raise OSError( f"Failed to save backup to destination {filename}" ) from exc self.verify(option=output_verify) if self._file.mode in ("append", "ostream"): for hdu in self: if verbose: try: extver = str(hdu._header["extver"]) except KeyError: extver = "" # only append HDU's which are "new" if hdu._new: hdu._prewriteto() with _free_space_check(self): hdu._writeto(self._file) if verbose: print("append HDU", hdu.name, extver) hdu._new = False hdu._postwriteto() elif self._file.mode == "update": self._flush_update() def update_extend(self): """ Make sure that if the primary header needs the keyword ``EXTEND`` that it has it and it is correct. """ if not len(self): return if not isinstance(self[0], PrimaryHDU): # A PrimaryHDU will be automatically inserted at some point, but it # might not have been added yet return hdr = self[0].header def get_first_ext(): try: return self[1] except IndexError: return None if "EXTEND" in hdr: if not hdr["EXTEND"] and get_first_ext() is not None: hdr["EXTEND"] = True elif get_first_ext() is not None: if hdr["NAXIS"] == 0: hdr.set("EXTEND", True, after="NAXIS") else: n = hdr["NAXIS"] hdr.set("EXTEND", True, after="NAXIS" + str(n)) def writeto( self, fileobj, output_verify="exception", overwrite=False, checksum=False ): """ Write the `HDUList` to a new file. Parameters ---------- fileobj : str, file-like or `pathlib.Path` File to write to. If a file object, must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. Notes ----- gzip, zip, bzip2 and lzma compression algorithms are natively supported. Compression mode is determined from the filename extension ('.gz', '.zip', '.bz2' or '.xz' respectively). It is also possible to pass a compressed file object, e.g. `gzip.GzipFile`. """ if len(self) == 0: warnings.warn("There is nothing to write.", AstropyUserWarning) return self.verify(option=output_verify) # make sure the EXTEND keyword is there if there is extension self.update_extend() if fileobj is sys.stdout: # special case stdout for debugging convenience # see https://github.com/astropy/astropy/issues/3427 fileobj = fileobj.buffer # make note of whether the input file object is already open, in which # case we should not close it after writing (that should be the job # of the caller) closed = isinstance(fileobj, str) or fileobj_closed(fileobj) mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream" # This can accept an open file object that's open to write only, or in # append/update modes but only if the file doesn't exist. fileobj = _File(fileobj, mode=mode, overwrite=overwrite) hdulist = self.fromfile(fileobj) try: dirname = os.path.dirname(hdulist._file.name) except (AttributeError, TypeError): dirname = None try: with _free_space_check(self, dirname=dirname): for hdu in self: hdu._output_checksum = checksum hdu._prewriteto() hdu._writeto(hdulist._file) hdu._postwriteto() finally: hdulist.close(output_verify=output_verify, closed=closed) def close(self, output_verify="exception", verbose=False, closed=True): """ Close the associated FITS file and memmap object, if any. Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print out verbose messages. closed : bool When `True`, close the underlying file object. """ try: if ( self._file and self._file.mode in ("append", "update") and not self._file.closed ): self.flush(output_verify=output_verify, verbose=verbose) finally: if self._file and closed and hasattr(self._file, "close"): self._file.close() # Give individual HDUs an opportunity to do on-close cleanup for hdu in self: hdu._close(closed=closed) def info(self, output=None): """ Summarize the info of the HDUs in this `HDUList`. Note that this function prints its results to the console---it does not return a value. Parameters ---------- output : file-like or bool, optional A file-like object to write the output to. If `False`, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. """ if output is None: output = sys.stdout if self._file is None: name = "(No file associated with this HDUList)" else: name = self._file.name results = [ f"Filename: {name}", "No. Name Ver Type Cards Dimensions Format", ] format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}" default = ("", "", "", 0, (), "", "") for idx, hdu in enumerate(self): summary = hdu._summary() if len(summary) < len(default): summary += default[len(summary) :] summary = (idx,) + summary if output: results.append(format.format(*summary)) else: results.append(summary) if output: output.write("\n".join(results)) output.write("\n") output.flush() else: return results[2:] def filename(self): """ Return the file name associated with the HDUList object if one exists. Otherwise returns None. Returns ------- filename : str A string containing the file name associated with the HDUList object if an association exists. Otherwise returns None. """ if self._file is not None: if hasattr(self._file, "name"): return self._file.name return None @classmethod def _readfrom( cls, fileobj=None, data=None, mode=None, memmap=None, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, *, use_fsspec=None, fsspec_kwargs=None, decompress_in_memory=False, **kwargs, ): """ Provides the implementations from HDUList.fromfile and HDUList.fromstring, both of which wrap this method, as their implementations are largely the same. """ if fileobj is not None: if not isinstance(fileobj, _File): # instantiate a FITS file object (ffo) fileobj = _File( fileobj, mode=mode, memmap=memmap, cache=cache, use_fsspec=use_fsspec, fsspec_kwargs=fsspec_kwargs, decompress_in_memory=decompress_in_memory, ) # The Astropy mode is determined by the _File initializer if the # supplied mode was None mode = fileobj.mode hdulist = cls(file=fileobj) else: if mode is None: # The default mode mode = "readonly" hdulist = cls(file=data) # This method is currently only called from HDUList.fromstring and # HDUList.fromfile. If fileobj is None then this must be the # fromstring case; the data type of ``data`` will be checked in the # _BaseHDU.fromstring call. if ( not ignore_missing_simple and hdulist._file and hdulist._file.mode != "ostream" and hdulist._file.size > 0 ): pos = hdulist._file.tell() # FITS signature is supposed to be in the first 30 bytes, but to # allow reading various invalid files we will check in the first # card (80 bytes). simple = hdulist._file.read(80) match_sig = simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in ( b"T", b"F", ) if not match_sig: # Check the SIMPLE card is there but not written correctly match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple) if match_sig_relaxed: warnings.warn( "Found a SIMPLE card but its format doesn't" " respect the FITS Standard", VerifyWarning, ) else: if hdulist._file.close_on_error: hdulist._file.close() raise OSError( "No SIMPLE card found, this file does not appear to " "be a valid FITS file. If this is really a FITS file, " "try with ignore_missing_simple=True" ) hdulist._file.seek(pos) # Store additional keyword args that were passed to fits.open hdulist._open_kwargs = kwargs if fileobj is not None and fileobj.writeonly: # Output stream--not interested in reading/parsing # the HDUs--just writing to the output file return hdulist # Make sure at least the PRIMARY HDU can be read read_one = hdulist._read_next_hdu() # If we're trying to read only and no header units were found, # raise an exception if not read_one and mode in ("readonly", "denywrite"): # Close the file if necessary (issue #6168) if hdulist._file.close_on_error: hdulist._file.close() raise OSError("Empty or corrupt FITS file") if not lazy_load_hdus or kwargs.get("checksum") is True: # Go ahead and load all HDUs while hdulist._read_next_hdu(): pass # initialize/reset attributes to be used in "update/append" mode hdulist._resize = False hdulist._truncate = False return hdulist def _try_while_unread_hdus(self, func, *args, **kwargs): """ Attempt an operation that accesses an HDU by index/name that can fail if not all HDUs have been read yet. Keep reading HDUs until the operation succeeds or there are no more HDUs to read. """ while True: try: return func(*args, **kwargs) except Exception: if self._read_next_hdu(): continue else: raise def _read_next_hdu(self): """ Lazily load a single HDU from the fileobj or data string the `HDUList` was opened from, unless no further HDUs are found. Returns True if a new HDU was loaded, or False otherwise. """ if self._read_all: return False fileobj, data, kwargs = self._file, self._data, self._open_kwargs if fileobj is not None and fileobj.closed: return False try: self._in_read_next_hdu = True # read all HDUs try: if fileobj is not None: try: # Make sure we're back to the end of the last read # HDU if len(self) > 0: last = self[len(self) - 1] if last._data_offset is not None: offset = last._data_offset + last._data_size fileobj.seek(offset, os.SEEK_SET) hdu = _BaseHDU.readfrom(fileobj, **kwargs) except EOFError: self._read_all = True return False except OSError: # Close the file: see # https://github.com/astropy/astropy/issues/6168 # if self._file.close_on_error: self._file.close() if fileobj.writeonly: self._read_all = True return False else: raise else: if not data: self._read_all = True return False hdu = _BaseHDU.fromstring(data, **kwargs) self._data = data[hdu._data_offset + hdu._data_size :] if not kwargs.get("disable_image_compression", False): if isinstance(hdu, BinTableHDU) and CompImageHDU.match_header( hdu.header ): kwargs_comp = { key: val for key, val in kwargs.items() if key in ("scale_back", "uint", "do_not_scale_image_data") } hdu = CompImageHDU(bintable=hdu, **kwargs_comp) super().append(hdu) if len(self) == 1: # Check for an extension HDU and update the EXTEND # keyword of the primary HDU accordingly self.update_extend() hdu._new = False if "checksum" in kwargs: hdu._output_checksum = kwargs["checksum"] # check in the case there is extra space after the last HDU or # corrupted HDU except (VerifyError, ValueError) as exc: warnings.warn( f"Error validating header for HDU #{len(self)} (note: Astropy " f"uses zero-based indexing).\n{indent(str(exc), 4 * ' ')}\n" "There may be extra bytes after the last HDU or the " "file is corrupted.", VerifyWarning, ) del exc self._read_all = True return False finally: self._in_read_next_hdu = False return True def _verify(self, option="warn"): errs = _ErrList([], unit="HDU") # the first (0th) element must be a primary HDU if ( len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and (not isinstance(self[0], _NonstandardHDU)) ): err_text = "HDUList's 0th element is not a primary HDU." fix_text = "Fixed by inserting one as 0th HDU." def fix(self=self): self.insert(0, PrimaryHDU()) err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) errs.append(err) if len(self) > 1 and ( "EXTEND" not in self[0].header or self[0].header["EXTEND"] is not True ): err_text = ( "Primary HDU does not contain an EXTEND keyword " "equal to T even though there are extension HDUs." ) fix_text = "Fixed by inserting or updating the EXTEND keyword." def fix(header=self[0].header): naxis = header["NAXIS"] if naxis == 0: after = "NAXIS" else: after = "NAXIS" + str(naxis) header.set("EXTEND", value=True, after=after) errs.append( self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) ) # each element calls their own verify for idx, hdu in enumerate(self): if idx > 0 and (not isinstance(hdu, ExtensionHDU)): err_text = f"HDUList's element {idx} is not an extension HDU." err = self.run_option(option, err_text=err_text, fixable=False) errs.append(err) else: result = hdu._verify(option) if result: errs.append(result) return errs def _flush_update(self): """Implements flushing changes to a file in update mode.""" for hdu in self: # Need to all _prewriteto() for each HDU first to determine if # resizing will be necessary hdu._prewriteto(inplace=True) try: self._wasresized() # if the HDUList is resized, need to write out the entire contents of # the hdulist to the file. if self._resize or self._file.compression: self._flush_resize() else: # if not resized, update in place for hdu in self: hdu._writeto(self._file, inplace=True) # reset the modification attributes after updating for hdu in self: hdu._header._modified = False finally: for hdu in self: hdu._postwriteto() def _flush_resize(self): """ Implements flushing changes in update mode when parts of one or more HDU need to be resized. """ old_name = self._file.name old_memmap = self._file.memmap name = _tmp_name(old_name) if not self._file.file_like: old_mode = os.stat(old_name).st_mode # The underlying file is an actual file object. The HDUList is # resized, so we need to write it to a tmp file, delete the # original file, and rename the tmp file to the original file. if self._file.compression == "gzip": new_file = gzip.GzipFile(name, mode="ab+") else: new_file = name with self.fromfile(new_file, mode="append") as hdulist: for hdu in self: hdu._writeto(hdulist._file, inplace=True, copy=True) if sys.platform.startswith("win"): # Collect a list of open mmaps to the data; this well be # used later. See below. mmaps = [ (idx, _get_array_mmap(hdu.data), hdu.data) for idx, hdu in enumerate(self) if hdu._has_data ] hdulist._file.close() self._file.close() if sys.platform.startswith("win"): # Close all open mmaps to the data. This is only necessary on # Windows, which will not allow a file to be renamed or deleted # until all handles to that file have been closed. for idx, mmap, arr in mmaps: if mmap is not None: mmap.close() os.remove(self._file.name) # reopen the renamed new file with "update" mode os.rename(name, old_name) os.chmod(old_name, old_mode) if isinstance(new_file, gzip.GzipFile): old_file = gzip.GzipFile(old_name, mode="rb+") else: old_file = old_name ffo = _File(old_file, mode="update", memmap=old_memmap) self._file = ffo for hdu in self: # Need to update the _file attribute and close any open mmaps # on each HDU if hdu._has_data and _get_array_mmap(hdu.data) is not None: del hdu.data hdu._file = ffo if sys.platform.startswith("win"): # On Windows, all the original data mmaps were closed above. # However, it's possible that the user still has references to # the old data which would no longer work (possibly even cause # a segfault if they try to access it). This replaces the # buffers used by the original arrays with the buffers of mmap # arrays created from the new file. This seems to work, but # it's a flaming hack and carries no guarantees that it won't # lead to odd behavior in practice. Better to just not keep # references to data from files that had to be resized upon # flushing (on Windows--again, this is no problem on Linux). for idx, mmap, arr in mmaps: if mmap is None: continue if NUMPY_LT_2_0: # Note that this hack is only possible on numpy 1.x: # in 2.x, we cannot write directly to the data attribute # https://github.com/numpy/numpy/issues/8628 with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) arr.data = self[idx].data.data elif sys.getrefcount(arr) > 2: # 2 is the minimum number of references to this object # counting `arr` and a reference as an argument to getrefcount(), # see https://docs.python.org/3/library/sys.html#sys.getrefcount warnings.warn( "Memory map object was closed but appears to still " "be referenced. Further access will result in undefined " "behavior (possibly including segmentation faults).", category=UserWarning, stacklevel=2, ) del mmaps # Just to be sure else: # The underlying file is not a file object, it is a file like # object. We can't write out to a file, we must update the file # like object in place. To do this, we write out to a temporary # file, then delete the contents in our file like object, then # write the contents of the temporary file to the now empty file # like object. self.writeto(name) hdulist = self.fromfile(name) ffo = self._file ffo.truncate(0) ffo.seek(0) for hdu in hdulist: hdu._writeto(ffo, inplace=True, copy=True) # Close the temporary file and delete it. hdulist.close() os.remove(hdulist._file.name) # reset the resize attributes after updating self._resize = False self._truncate = False for hdu in self: hdu._header._modified = False hdu._new = False hdu._file = ffo def _wasresized(self, verbose=False): """ Determine if any changes to the HDUList will require a file resize when flushing the file. Side effect of setting the objects _resize attribute. """ if not self._resize: # determine if any of the HDU is resized for hdu in self: # for CompImageHDU, we need to handle things a little differently # because the HDU matching the header/data on disk is hdu._bintable if isinstance(hdu, CompImageHDU): hdu = hdu._bintable if hdu is None: continue # Header: nbytes = len(str(hdu._header)) if nbytes != (hdu._data_offset - hdu._header_offset): self._resize = True self._truncate = False if verbose: print("One or more header is resized.") break # Data: if not hdu._has_data: continue nbytes = hdu.size nbytes = nbytes + _pad_length(nbytes) if nbytes != hdu._data_size: self._resize = True self._truncate = False if verbose: print("One or more data area is resized.") break if self._truncate: try: self._file.truncate(hdu._data_offset + hdu._data_size) except OSError: self._resize = True self._truncate = False return self._resize
HDUList
python
neetcode-gh__leetcode
python/0724-find-pivot-index.py
{ "start": 0, "end": 315 }
class ____: def pivotIndex(self, nums: List[int]) -> int: total = sum(nums) # O(n) leftSum = 0 for i in range(len(nums)): rightSum = total - nums[i] - leftSum if leftSum == rightSum: return i leftSum += nums[i] return -1
Solution
python
cython__cython
Cython/Debugger/libpython.py
{ "start": 60798, "end": 61173 }
class ____(gdb.Command): 'Select and print the python stack frame that called this one (if any)' def __init__(self): gdb.Command.__init__ (self, "py-up", gdb.COMMAND_STACK, gdb.COMPLETE_NONE) def invoke(self, args, from_tty): move_in_stack(move_up=True)
PyUp
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 178129, "end": 178949 }
class ____(TypedDict, total=False): """ :class:`altair.MultiPoint` ``TypedDict`` wrapper. Parameters ---------- coordinates type Specifies the type of GeoJSON object. bbox Bounding box of the coordinate range of the object's Geometries, Features, or Feature Collections. The value of the bbox member is an array of length 2*n where n is the number of dimensions represented in the contained geometries, with all axes of the most southwesterly point followed by all axes of the more northeasterly point. The axes order of a bbox follows the axes order of geometries. https://tools.ietf.org/html/rfc7946#section-5 """ coordinates: Sequence[Sequence[float]] type: Literal["MultiPoint"] bbox: Sequence[float]
MultiPointKwds
python
ray-project__ray
python/ray/data/datasource/file_meta_provider.py
{ "start": 5416, "end": 18659 }
class ____(DefaultFileMetadataProvider): """Fast Metadata provider for :class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource` implementations. Offers improved performance vs. :class:`DefaultFileMetadataProvider` by skipping directory path expansion and file size collection. While this performance improvement may be negligible for local filesystems, it can be substantial for cloud storage service providers. This should only be used when all input paths exist and are known to be files. """ def expand_paths( self, paths: List[str], filesystem: "RetryingPyFileSystem", partitioning: Optional[Partitioning] = None, ignore_missing_paths: bool = False, ) -> Iterator[Tuple[str, int]]: if ignore_missing_paths: raise ValueError( "`ignore_missing_paths` cannot be set when used with " "`FastFileMetadataProvider`. All paths must exist when " "using `FastFileMetadataProvider`." ) logger.warning( f"Skipping expansion of {len(paths)} path(s). If your paths contain " f"directories or if file size collection is required, try rerunning this " f"read with `meta_provider=DefaultFileMetadataProvider()`." ) yield from zip(paths, itertools.repeat(None, len(paths))) def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str: # NOTE: this is not comprehensive yet, and should be extended as more errors arise. # NOTE: The latter patterns are raised in Arrow 10+, while the former is raised in # Arrow < 10. aws_error_pattern = ( r"^(?:(.*)AWS Error \[code \d+\]: No response body\.(.*))|" r"(?:(.*)AWS Error UNKNOWN \(HTTP status 400\) during HeadObject operation: " r"No response body\.(.*))|" r"(?:(.*)AWS Error ACCESS_DENIED during HeadObject operation: No response " r"body\.(.*))$" ) if re.match(aws_error_pattern, str(error)): # Specially handle AWS error when reading files, to give a clearer error # message to avoid confusing users. The real issue is most likely that the AWS # S3 file credentials have not been properly configured yet. if isinstance(paths, str): # Quote to highlight single file path in error message for better # readability. List of file paths will be shown up as ['foo', 'boo'], # so only quote single file path here. paths = f'"{paths}"' raise OSError( ( f"Failing to read AWS S3 file(s): {paths}. " "Please check that file exists and has properly configured access. " "You can also run AWS CLI command to get more detailed error message " "(e.g., aws s3 ls <file-name>). " "See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa "and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa "for more information." ) ) else: raise error def _list_files( paths: List[str], filesystem: "RetryingPyFileSystem", *, partition_filter: Optional[PathPartitionFilter], file_extensions: Optional[List[str]], ) -> List[Tuple[str, int]]: return list( _list_files_internal( paths, filesystem, partition_filter=partition_filter, file_extensions=file_extensions, ) ) def _list_files_internal( paths: List[str], filesystem: "RetryingPyFileSystem", *, partition_filter: Optional[PathPartitionFilter], file_extensions: Optional[List[str]], ) -> Iterator[Tuple[str, int]]: default_meta_provider = DefaultFileMetadataProvider() for path, file_size in default_meta_provider.expand_paths(paths, filesystem): # HACK: PyArrow's `ParquetDataset` errors if input paths contain non-parquet # files. To avoid this, we expand the input paths with the default metadata # provider and then apply the partition filter or file extensions. if ( partition_filter and not partition_filter.apply(path) or not _has_file_extension(path, file_extensions) ): continue yield path, file_size def _expand_paths( paths: List[str], filesystem: "RetryingPyFileSystem", partitioning: Optional[Partitioning], ignore_missing_paths: bool = False, ) -> Iterator[Tuple[str, int]]: """Get the file sizes for all provided file paths.""" from pyarrow.fs import LocalFileSystem from ray.data.datasource.file_based_datasource import ( FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD, ) from ray.data.datasource.path_util import _is_http_url, _unwrap_protocol # We break down our processing paths into a few key cases: # 1. If len(paths) < threshold, fetch the file info for the individual files/paths # serially. # 2. If all paths are contained under the same parent directory (or base directory, # if using partitioning), fetch all file infos at this prefix and filter to the # provided paths on the client; this should be a single file info request. # 3. If more than threshold requests required, parallelize them via Ray tasks. # 1. Small # of paths case. is_local = isinstance(filesystem, LocalFileSystem) if isinstance(filesystem, RetryingPyFileSystem): is_local = isinstance(filesystem.unwrap(), LocalFileSystem) if ( len(paths) < FILE_SIZE_FETCH_PARALLELIZATION_THRESHOLD # Local file systems are very fast to hit. or is_local ): yield from _get_file_infos_serial(paths, filesystem, ignore_missing_paths) else: # 2. Common path prefix case. # Get longest common path of all paths. common_path = os.path.commonpath(paths) # If parent directory (or base directory, if using partitioning) is common to # all paths, fetch all file infos at that prefix and filter the response to the # provided paths. if not _is_http_url(common_path) and ( ( partitioning is not None and common_path == _unwrap_protocol(partitioning.base_dir) ) or all(str(pathlib.Path(path).parent) == common_path for path in paths) ): yield from _get_file_infos_common_path_prefix( paths, common_path, filesystem, ignore_missing_paths ) # 3. Parallelization case. else: # Parallelize requests via Ray tasks. yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths) def _get_file_infos_serial( paths: List[str], filesystem: "RetryingPyFileSystem", ignore_missing_paths: bool = False, ) -> Iterator[Tuple[str, int]]: for path in paths: yield from _get_file_infos(path, filesystem, ignore_missing_paths) def _get_file_infos_common_path_prefix( paths: List[str], common_path: str, filesystem: "pyarrow.fs.FileSystem", ignore_missing_paths: bool = False, ) -> Iterator[Tuple[str, int]]: path_to_size = {path: None for path in paths} for path, file_size in _get_file_infos( common_path, filesystem, ignore_missing_paths ): if path in path_to_size: path_to_size[path] = file_size # Check if all `paths` have file size metadata. # If any of paths has no file size, fall back to get files metadata in parallel. # This can happen when path is a directory, but not a file. have_missing_path = False for path in paths: if path_to_size[path] is None: logger.debug( f"Finding path {path} not have file size metadata. " "Fall back to get files metadata in parallel for all paths." ) have_missing_path = True break if have_missing_path: # Parallelize requests via Ray tasks. yield from _get_file_infos_parallel(paths, filesystem, ignore_missing_paths) else: # Iterate over `paths` to yield each path in original order. # NOTE: do not iterate over `path_to_size` because the dictionary skips # duplicated path, while `paths` might contain duplicated path if one wants # to read same file multiple times. for path in paths: yield path, path_to_size[path] def _get_file_infos_parallel( paths: List[str], filesystem: "RetryingPyFileSystem", ignore_missing_paths: bool = False, ) -> Iterator[Tuple[str, int]]: from ray.data.datasource.file_based_datasource import ( PATHS_PER_FILE_SIZE_FETCH_TASK, _unwrap_s3_serialization_workaround, _wrap_s3_serialization_workaround, ) logger.warning( f"Expanding {len(paths)} path(s). This may be a HIGH LATENCY " f"operation on some cloud storage services. Moving all the " "paths to a common parent directory will lead to faster " "metadata fetching." ) # Capture the filesystem in the fetcher func closure, but wrap it in our # serialization workaround to make sure that the pickle roundtrip works as expected. filesystem = _wrap_s3_serialization_workaround(filesystem) def _file_infos_fetcher(paths: List[str]) -> List[Tuple[str, int]]: fs = _unwrap_s3_serialization_workaround(filesystem) return list( itertools.chain.from_iterable( _get_file_infos(path, fs, ignore_missing_paths) for path in paths ) ) yield from _fetch_metadata_parallel( paths, _file_infos_fetcher, PATHS_PER_FILE_SIZE_FETCH_TASK ) Uri = TypeVar("Uri") Meta = TypeVar("Meta") def _fetch_metadata_parallel( uris: List[Uri], fetch_func: Callable[[List[Uri]], List[Meta]], desired_uris_per_task: int, **ray_remote_args, ) -> Iterator[Meta]: """Fetch file metadata in parallel using Ray tasks.""" remote_fetch_func = cached_remote_fn(fetch_func) if ray_remote_args: remote_fetch_func = remote_fetch_func.options(**ray_remote_args) # Choose a parallelism that results in a # of metadata fetches per task that # dominates the Ray task overhead while ensuring good parallelism. # Always launch at least 2 parallel fetch tasks. parallelism = max(len(uris) // desired_uris_per_task, 2) metadata_fetch_bar = ProgressBar( "Metadata Fetch Progress", total=parallelism, unit="task" ) fetch_tasks = [] for uri_chunk in np.array_split(uris, parallelism): if len(uri_chunk) == 0: continue fetch_tasks.append(remote_fetch_func.remote(uri_chunk)) results = metadata_fetch_bar.fetch_until_complete(fetch_tasks) yield from itertools.chain.from_iterable(results) def _get_file_infos( path: str, filesystem: "RetryingPyFileSystem", ignore_missing_path: bool = False ) -> List[Tuple[str, int]]: """Get the file info for all files at or under the provided path.""" from pyarrow.fs import FileType file_infos = [] try: file_info = filesystem.get_file_info(path) except OSError as e: _handle_read_os_error(e, path) if file_info.type == FileType.Directory: for file_path, file_size in _expand_directory(path, filesystem): file_infos.append((file_path, file_size)) elif file_info.type == FileType.File: file_infos.append((path, file_info.size)) elif file_info.type == FileType.NotFound and ignore_missing_path: pass else: raise FileNotFoundError(path) return file_infos def _expand_directory( path: str, filesystem: "RetryingPyFileSystem", exclude_prefixes: Optional[List[str]] = None, ignore_missing_path: bool = False, ) -> List[Tuple[str, int]]: """ Expand the provided directory path to a list of file paths. Args: path: The directory path to expand. filesystem: The filesystem implementation that should be used for reading these files. exclude_prefixes: The file relative path prefixes that should be excluded from the returned file set. Default excluded prefixes are "." and "_". Returns: An iterator of (file_path, file_size) tuples. """ if exclude_prefixes is None: exclude_prefixes = [".", "_"] from pyarrow.fs import FileSelector selector = FileSelector(path, recursive=True, allow_not_found=ignore_missing_path) files = filesystem.get_file_info(selector) base_path = selector.base_dir out = [] for file_ in files: if not file_.is_file: continue file_path = file_.path if not file_path.startswith(base_path): continue relative = file_path[len(base_path) :] if any(relative.startswith(prefix) for prefix in exclude_prefixes): continue out.append((file_path, file_.size)) # We sort the paths to guarantee a stable order. return sorted(out)
FastFileMetadataProvider
python
qdrant__qdrant-client
qdrant_client/http/api/indexes_api.py
{ "start": 3400, "end": 4424 }
class ____(_IndexesApi): async def create_field_index( self, collection_name: str, wait: bool = None, ordering: WriteOrdering = None, create_field_index: m.CreateFieldIndex = None, ) -> m.InlineResponse2005: """ Create index for field in collection """ return await self._build_for_create_field_index( collection_name=collection_name, wait=wait, ordering=ordering, create_field_index=create_field_index, ) async def delete_field_index( self, collection_name: str, field_name: str, wait: bool = None, ordering: WriteOrdering = None, ) -> m.InlineResponse2005: """ Delete field index for collection """ return await self._build_for_delete_field_index( collection_name=collection_name, field_name=field_name, wait=wait, ordering=ordering, )
AsyncIndexesApi
python
django__django
tests/template_tests/filter_tests/test_yesno.py
{ "start": 117, "end": 355 }
class ____(SimpleTestCase): @setup({"t": '{{ var|yesno:"yup,nup,mup" }} {{ var|yesno }}'}) def test_true(self): output = self.engine.render_to_string("t", {"var": True}) self.assertEqual(output, "yup yes")
YesNoTests
python
conda__conda
conda/common/_logic.py
{ "start": 5143, "end": 6059 }
class ____(_SatSolver): def setup(self, m, limit=0, **kwargs): from pycosat import itersolve # NOTE: The iterative solving isn't actually used here, we just call # itersolve to separate setup from the actual run. return itersolve(self._clauses.as_list(), vars=m, prop_limit=limit) # If we add support for passing the clauses as an integer stream to the # solvers, we could also use self._clauses.as_array like this: # return itersolve(self._clauses.as_array(), vars=m, prop_limit=limit) def invoke(self, iter_sol): try: sat_solution = next(iter_sol) except StopIteration: sat_solution = "UNSAT" del iter_sol return sat_solution def process_solution(self, sat_solution): if sat_solution in ("UNSAT", "UNKNOWN"): return None return sat_solution
_PycoSatSolver
python
pytest-dev__pytest
src/_pytest/main.py
{ "start": 16101, "end": 17846 }
class ____(nodes.Directory): """Collector of files in a file system directory. .. versionadded:: 8.0 .. note:: Python directories with an `__init__.py` file are instead collected by :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` collectors. """ @classmethod def from_parent( # type: ignore[override] cls, parent: nodes.Collector, *, path: Path, ) -> Self: """The public constructor. :param parent: The parent collector of this Dir. :param path: The directory's path. :type path: pathlib.Path """ return super().from_parent(parent=parent, path=path) def collect(self) -> Iterable[nodes.Item | nodes.Collector]: config = self.config col: nodes.Collector | None cols: Sequence[nodes.Collector] ihook = self.ihook for direntry in scandir(self.path): if direntry.is_dir(): path = Path(direntry.path) if not self.session.isinitpath(path, with_parents=True): if ihook.pytest_ignore_collect(collection_path=path, config=config): continue col = ihook.pytest_collect_directory(path=path, parent=self) if col is not None: yield col elif direntry.is_file(): path = Path(direntry.path) if not self.session.isinitpath(path): if ihook.pytest_ignore_collect(collection_path=path, config=config): continue cols = ihook.pytest_collect_file(file_path=path, parent=self) yield from cols @final
Dir
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/assets/graph/asset_graph_differ.py
{ "start": 1090, "end": 1223 }
class ____(Generic[T]): added_keys: Set[T] changed_keys: Set[T] removed_keys: Set[T] @whitelist_for_serdes @record
DictDiff
python
sqlalchemy__sqlalchemy
test/base/test_utils.py
{ "start": 1475, "end": 2248 }
class ____(fixtures.TestBase): @testing.requires.predictable_gc def test_cleanout_elements(self): class Foo: pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence([f1, f2, f3]) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2) @testing.requires.predictable_gc def test_cleanout_appended(self): class Foo: pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence() w.append(f1) w.append(f2) w.append(f3) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2)
WeakSequenceTest
python
ansible__ansible
test/lib/ansible_test/_internal/processes.py
{ "start": 405, "end": 2231 }
class ____: """A process in the process tree.""" pid: int command: str parent: Process | None = None children: tuple[Process, ...] = dataclasses.field(default_factory=tuple) @property def args(self) -> list[str]: """The list of arguments that make up `command`.""" return shlex.split(self.command) @property def path(self) -> pathlib.Path: """The path to the process.""" return pathlib.Path(self.args[0]) def get_process_data(pids: list[int] | None = None) -> list[ProcessData]: """Return a list of running processes.""" if pids: args = ['-p', ','.join(map(str, pids))] else: args = ['-A'] lines = raw_command(['ps'] + args + ['-o', 'pid,ppid,command'], capture=True)[0].splitlines()[1:] processes = [ProcessData(pid=int(pid), ppid=int(ppid), command=command) for pid, ppid, command in (line.split(maxsplit=2) for line in lines)] return processes def get_process_tree() -> dict[int, Process]: """Return the process tree.""" processes = get_process_data() pid_to_process: dict[int, Process] = {} pid_to_children: dict[int, list[Process]] = collections.defaultdict(list) for data in processes: pid_to_process[data.pid] = process = Process(pid=data.pid, command=data.command) if data.ppid: pid_to_children[data.ppid].append(process) for data in processes: pid_to_process[data.pid] = dataclasses.replace( pid_to_process[data.pid], parent=pid_to_process.get(data.ppid), children=tuple(pid_to_children[data.pid]), ) return pid_to_process def get_current_process() -> Process: """Return the current process along with its ancestors and descendants.""" return get_process_tree()[os.getpid()]
Process
python
doocs__leetcode
solution/1600-1699/1636.Sort Array by Increasing Frequency/Solution.py
{ "start": 0, "end": 159 }
class ____: def frequencySort(self, nums: List[int]) -> List[int]: cnt = Counter(nums) return sorted(nums, key=lambda x: (cnt[x], -x))
Solution
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image53.py
{ "start": 315, "end": 862 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image53.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"description": ""}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
huggingface__transformers
src/transformers/models/parakeet/modeling_parakeet.py
{ "start": 2095, "end": 2198 }
class ____(BaseModelOutput): attention_mask: Optional[torch.Tensor] = None
ParakeetEncoderModelOutput
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 849136, "end": 849526 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("ProjectV2Item", graphql_name="node") """The item at the end of the edge."""
ProjectV2ItemEdge
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/trainer_controller.py
{ "start": 1028, "end": 11391 }
class ____: def __init__( self, trainer_factory: TrainerFactory, output_path: str, run_id: str, param_manager: EnvironmentParameterManager, train: bool, training_seed: int, ): """ :param output_path: Path to save the model. :param summaries_dir: Folder to save training summaries. :param run_id: The sub-directory name for model and summary statistics :param param_manager: EnvironmentParameterManager object which stores information about all environment parameters. :param train: Whether to train model, or only run inference. :param training_seed: Seed to use for Numpy and Torch random number generation. :param threaded: Whether or not to run trainers in a separate thread. Disable for testing/debugging. """ self.trainers: Dict[str, Trainer] = {} self.brain_name_to_identifier: Dict[str, Set] = defaultdict(set) self.trainer_factory = trainer_factory self.output_path = output_path self.logger = get_logger(__name__) self.run_id = run_id self.train_model = train self.param_manager = param_manager self.ghost_controller = self.trainer_factory.ghost_controller self.registered_behavior_ids: Set[str] = set() self.trainer_threads: List[threading.Thread] = [] self.kill_trainers = False np.random.seed(training_seed) torch_utils.torch.manual_seed(training_seed) self.rank = get_rank() @timed def _save_models(self): """ Saves current model to checkpoint folder. """ if self.rank is not None and self.rank != 0: return for brain_name in self.trainers.keys(): self.trainers[brain_name].save_model() self.logger.debug("Saved Model") @staticmethod def _create_output_path(output_path): try: if not os.path.exists(output_path): os.makedirs(output_path) except Exception: raise UnityEnvironmentException( f"The folder {output_path} containing the " "generated model could not be " "accessed. Please make sure the " "permissions are set correctly." ) @timed def _reset_env(self, env_manager: EnvManager) -> None: """Resets the environment. Returns: A Data structure corresponding to the initial reset state of the environment. """ new_config = self.param_manager.get_current_samplers() env_manager.reset(config=new_config) # Register any new behavior ids that were generated on the reset. self._register_new_behaviors(env_manager, env_manager.first_step_infos) def _not_done_training(self) -> bool: return ( any(t.should_still_train for t in self.trainers.values()) or not self.train_model ) or len(self.trainers) == 0 def _create_trainer_and_manager( self, env_manager: EnvManager, name_behavior_id: str ) -> None: parsed_behavior_id = BehaviorIdentifiers.from_name_behavior_id(name_behavior_id) brain_name = parsed_behavior_id.brain_name trainerthread = None if brain_name in self.trainers: trainer = self.trainers[brain_name] else: trainer = self.trainer_factory.generate(brain_name) self.trainers[brain_name] = trainer if trainer.threaded: # Only create trainer thread for new trainers trainerthread = threading.Thread( target=self.trainer_update_func, args=(trainer,), daemon=True ) self.trainer_threads.append(trainerthread) env_manager.on_training_started( brain_name, self.trainer_factory.trainer_config[brain_name] ) policy = trainer.create_policy( parsed_behavior_id, env_manager.training_behaviors[name_behavior_id], ) trainer.add_policy(parsed_behavior_id, policy) agent_manager = AgentManager( policy, name_behavior_id, trainer.stats_reporter, trainer.parameters.time_horizon, threaded=trainer.threaded, ) env_manager.set_agent_manager(name_behavior_id, agent_manager) env_manager.set_policy(name_behavior_id, policy) self.brain_name_to_identifier[brain_name].add(name_behavior_id) trainer.publish_policy_queue(agent_manager.policy_queue) trainer.subscribe_trajectory_queue(agent_manager.trajectory_queue) # Only start new trainers if trainerthread is not None: trainerthread.start() def _create_trainers_and_managers( self, env_manager: EnvManager, behavior_ids: Set[str] ) -> None: for behavior_id in behavior_ids: self._create_trainer_and_manager(env_manager, behavior_id) @timed def start_learning(self, env_manager: EnvManager) -> None: self._create_output_path(self.output_path) try: # Initial reset self._reset_env(env_manager) self.param_manager.log_current_lesson() while self._not_done_training(): n_steps = self.advance(env_manager) for _ in range(n_steps): self.reset_env_if_ready(env_manager) # Stop advancing trainers self.join_threads() except ( KeyboardInterrupt, UnityCommunicationException, UnityEnvironmentException, UnityCommunicatorStoppedException, ) as ex: self.join_threads() self.logger.info( "Learning was interrupted. Please wait while the graph is generated." ) if isinstance(ex, KeyboardInterrupt) or isinstance( ex, UnityCommunicatorStoppedException ): pass else: # If the environment failed, we want to make sure to raise # the exception so we exit the process with an return code of 1. raise ex finally: if self.train_model: self._save_models() def end_trainer_episodes(self) -> None: # Reward buffers reset takes place only for curriculum learning # else no reset. for trainer in self.trainers.values(): trainer.end_episode() def reset_env_if_ready(self, env: EnvManager) -> None: # Get the sizes of the reward buffers. reward_buff = {k: list(t.reward_buffer) for (k, t) in self.trainers.items()} curr_step = {k: int(t.get_step) for (k, t) in self.trainers.items()} max_step = {k: int(t.get_max_steps) for (k, t) in self.trainers.items()} # Attempt to increment the lessons of the brains who # were ready. updated, param_must_reset = self.param_manager.update_lessons( curr_step, max_step, reward_buff ) if updated: for trainer in self.trainers.values(): trainer.reward_buffer.clear() # If ghost trainer swapped teams ghost_controller_reset = self.ghost_controller.should_reset() if param_must_reset or ghost_controller_reset: self._reset_env(env) # This reset also sends the new config to env self.end_trainer_episodes() elif updated: env.set_env_parameters(self.param_manager.get_current_samplers()) @timed def advance(self, env_manager: EnvManager) -> int: # Get steps with hierarchical_timer("env_step"): new_step_infos = env_manager.get_steps() self._register_new_behaviors(env_manager, new_step_infos) num_steps = env_manager.process_steps(new_step_infos) # Report current lesson for each environment parameter for ( param_name, lesson_number, ) in self.param_manager.get_current_lesson_number().items(): for trainer in self.trainers.values(): trainer.stats_reporter.set_stat( f"Environment/Lesson Number/{param_name}", lesson_number ) for trainer in self.trainers.values(): if not trainer.threaded: with hierarchical_timer("trainer_advance"): trainer.advance() return num_steps def _register_new_behaviors( self, env_manager: EnvManager, step_infos: List[EnvironmentStep] ) -> None: """ Handle registration (adding trainers and managers) of new behaviors ids. :param env_manager: :param step_infos: :return: """ step_behavior_ids: Set[str] = set() for s in step_infos: step_behavior_ids |= set(s.name_behavior_ids) new_behavior_ids = step_behavior_ids - self.registered_behavior_ids self._create_trainers_and_managers(env_manager, new_behavior_ids) self.registered_behavior_ids |= step_behavior_ids def join_threads(self, timeout_seconds: float = 1.0) -> None: """ Wait for threads to finish, and merge their timer information into the main thread. :param timeout_seconds: :return: """ self.kill_trainers = True for t in self.trainer_threads: try: t.join(timeout_seconds) except Exception: pass with hierarchical_timer("trainer_threads") as main_timer_node: for trainer_thread in self.trainer_threads: thread_timer_stack = get_timer_stack_for_thread(trainer_thread) if thread_timer_stack: main_timer_node.merge( thread_timer_stack.root, root_name="thread_root", is_parallel=True, ) merge_gauges(thread_timer_stack.gauges) def trainer_update_func(self, trainer: Trainer) -> None: while not self.kill_trainers: with hierarchical_timer("trainer_advance"): trainer.advance()
TrainerController
python
falconry__falcon
falcon/errors.py
{ "start": 17894, "end": 19998 }
class ____(HTTPNotFound): """404 Not Found. The request did not match any routes configured for the application. This subclass of :class:`~.HTTPNotFound` is raised by the framework to provide a default 404 response when no route matches the request. This behavior can be customized by registering a custom error handler for :class:`~.HTTPRouteNotFound`. All the arguments are defined as keyword-only. Keyword Args: title (str): Human-friendly error title. If not provided, and `description` is also not provided, no body will be included in the response. description (str): Human-friendly description of the error, along with a helpful suggestion or two (default ``None``). headers (dict or list): A ``dict`` of header names and values to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and *value* must be of type ``str`` or ``StringType``, and only character values 0x00 through 0xFF may be used on platforms that use wide characters. Note: The Content-Type header, if present, will be overridden. If you wish to return custom error messages, you can create your own HTTP error class, and install an error handler to convert it into an appropriate HTTP response for the client Note: Falcon can process a list of ``tuple`` slightly faster than a ``dict``. href (str): A URL someone can visit to find out more information (default ``None``). Unicode characters are percent-encoded. href_text (str): If href is given, use this as the friendly title/description for the link (default 'API documentation for this error'). code (int): An internal code that customers can reference in their support request or to help them when searching for knowledge base articles related to this error (default ``None``). """
HTTPRouteNotFound
python
pytorch__pytorch
test/distributed/elastic/test_control_plane.py
{ "start": 1449, "end": 10266 }
class ____(TestCase): def test_worker_server(self) -> None: with local_worker_server() as pool: resp = pool.request("GET", "/") self.assertEqual(resp.status, 200) self.assertEqual( resp.data, b"<h1>torch.distributed.WorkerServer</h1>\n" b'<a href="' b"/handler/" b'">Handler names</a>\n', ) resp = pool.request("POST", "/handler/ping") self.assertEqual(resp.status, 200) self.assertEqual(resp.data, b"pong") resp = pool.request("GET", "/handler/") self.assertEqual(resp.status, 200) self.assertIn("ping", json.loads(resp.data)) resp = pool.request("POST", "/handler/nonexistent") self.assertEqual(resp.status, 404) self.assertIn(b"Handler nonexistent not found:", resp.data) @requires_cuda def test_dump_nccl_trace_pickle(self) -> None: with local_worker_server() as pool: resp = pool.request("POST", "/handler/dump_nccl_trace_pickle") self.assertEqual(resp.status, 200) out = pickle.loads(resp.data) self.assertIsInstance(out, dict) self.assertIn("version", out) @requires_cuda def test_dump_nccl_trace_pickle_with_params(self) -> None: with local_worker_server() as pool: # bad key - not lower case resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?includeCollectives=true" ) self.assertEqual(resp.status, 400) # unknown key resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?unknownkey=true" ) self.assertEqual(resp.status, 400) # bad value - not a bool resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?includecollectives=notabool" ) self.assertEqual(resp.status, 400) # bad value - value not lowercase resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?includecollectives=True" ) self.assertEqual(resp.status, 400) # good key and value resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?includecollectives=true" ) self.assertEqual(resp.status, 200) # multiple good keys and values resp = pool.request( "POST", "/handler/dump_nccl_trace_pickle?includecollectives=true&includestacktraces=false&onlyactive=true", ) self.assertEqual(resp.status, 200) @requires_cuda def test_dump_nccl_trace_pickle_with_json(self) -> None: with local_worker_server() as pool: # bad key - not lower case resp = pool.request( "POST", "/handler/dump_nccl_trace_json?includeCollectives=true" ) self.assertEqual(resp.status, 400) # unknown key resp = pool.request("POST", "/handler/dump_nccl_trace_json?unknownkey=true") self.assertEqual(resp.status, 400) # bad value - not a bool resp = pool.request( "POST", "/handler/dump_nccl_trace_json?includecollectives=notabool" ) self.assertEqual(resp.status, 400) # bad value - value not lowercase resp = pool.request( "POST", "/handler/dump_nccl_trace_json?includecollectives=True" ) self.assertEqual(resp.status, 400) # good key and value resp = pool.request( "POST", "/handler/dump_nccl_trace_json?includecollectives=true" ) self.assertEqual(resp.status, 200) # multiple good keys and values resp = pool.request( "POST", "/handler/dump_nccl_trace_json?includecollectives=true&onlyactive=true", ) self.assertEqual(resp.status, 200) def test_tcp(self) -> None: import requests from torch._C._distributed_c10d import _WorkerServer server = _WorkerServer("", 1234) out = requests.get("http://localhost:1234/handler/") self.assertEqual(out.status_code, 200) server.shutdown() def test_dump_traceback(self) -> None: with local_worker_server() as pool: resp = pool.request("POST", "/handler/dump_traceback") self.assertEqual(resp.status, 200) self.assertIn(b"in test_dump_traceback\n", resp.data) def test_run_handler(self) -> None: from torch._C._distributed_c10d import _get_handler, _Request, _Response handler = _get_handler("ping") class Request(_Request): def __init__(self) -> None: _Request.__init__(self) def body(self) -> bytes: return b"dummy" def params(self) -> dict[str, str]: return {} class Response(_Response): def __init__(self) -> None: _Response.__init__(self) def set_content(self, content: str, content_type: str) -> None: self.content = content self.content_type = content_type def set_status(self, status: int) -> None: self.status = status req = Request() resp = Response() handler(req, resp) self.assertEqual(resp.status, 200) self.assertEqual(resp.content, "pong") self.assertEqual(resp.content_type, "text/plain") def test_get_handler_nonexistant(self) -> None: from torch._C._distributed_c10d import _get_handler with self.assertRaisesRegex(ValueError, "Failed to find handler nonexistent"): _get_handler("nonexistent") def test_get_handler_names(self) -> None: from torch._C._distributed_c10d import _get_handler_names names = _get_handler_names() self.assertIn("ping", names) @unittest.skipIf(IS_FBCODE, "disabled in FBCODE") def test_wait_counter_values(self) -> None: """ Test that WaitCounter values are properly tracked and returned by the handler. Note: This test may trigger an ASAN heap-use-after-free error during process shutdown due to static destruction order issues with boost regex in the logging framework. The test assertions pass successfully before this shutdown error occurs. """ with local_worker_server() as pool: # Create and use a WaitCounter with a specific name counter_name = "test_counter" counter = _WaitCounter(counter_name) # Use the counter multiple times to generate metrics # Note: Using minimal/no sleep to avoid timing issues for i in range(3): with counter.guard(): pass # Minimal work # Query the wait counter values resp = pool.request("POST", "/handler/wait_counter_values") self.assertEqual(resp.status, 200) # Parse the JSON response data = json.loads(resp.data) # Should be a dictionary self.assertIsInstance(data, dict) # Verify our test counter appears in the response self.assertIn( counter_name, data, f"Counter '{counter_name}' not found in response. Available counters: {list(data.keys())}", ) # Verify the counter has expected metrics counter_data = data[counter_name] self.assertIn("active_count", counter_data) self.assertIn("total_calls", counter_data) self.assertIn("total_time_us", counter_data) self.assertIn("max_time_us", counter_data) # Verify the counter was called 3 times self.assertEqual( counter_data["total_calls"], 3, f"Expected 3 calls, got {counter_data['total_calls']}", ) # Verify active_count is 0 (no active waiters) self.assertEqual( counter_data["active_count"], 0, f"Expected 0 active, got {counter_data['active_count']}", ) # total_time_us and max_time_us may be 0 or very small for fast operations # Just verify they exist and are non-negative self.assertGreaterEqual(counter_data["total_time_us"], 0) self.assertGreaterEqual(counter_data["max_time_us"], 0) if __name__ == "__main__": run_tests()
WorkerServerTest
python
scipy__scipy
scipy/sparse/tests/test_base.py
{ "start": 195152, "end": 195885 }
class ____(_MatrixMixin, BaseTestCOO, sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spcreator = coo_matrix TestCOO.init_class() TestCOOMatrix.init_class() def test_sparray_subscriptable(): result = coo_array[np.int8, tuple[int]] assert isinstance(result, GenericAlias) assert result.__origin__ is coo_array assert result.__args__ == (np.int8, tuple[int]) result = coo_array[np.int8] assert isinstance(result, GenericAlias) assert result.__origin__ is coo_array assert result.__args__ == (np.int8,)
TestCOOMatrix
python
redis__redis-py
tests/test_asyncio/conftest.py
{ "start": 8362, "end": 9302 }
class ____: def __init__(self, async_generator): self.gen = async_generator async def __aenter__(self): try: return await self.gen.__anext__() except StopAsyncIteration as err: raise RuntimeError("Pickles") from err async def __aexit__(self, exc_type, exc_inst, tb): if exc_type: await self.gen.athrow(exc_type, exc_inst, tb) return True try: await self.gen.__anext__() except StopAsyncIteration: return raise RuntimeError("More pickles") def asynccontextmanager(func): return _asynccontextmanager(func) # helpers to get the connection arguments for this run @pytest.fixture() def redis_url(request): return request.config.getoption("--redis-url") @pytest.fixture() def connect_args(request): url = request.config.getoption("--redis-url") return parse_url(url)
AsyncContextManager
python
sympy__sympy
sympy/functions/special/bessel.py
{ "start": 47858, "end": 53311 }
class ____(AiryBase): r""" The Airy function $\operatorname{Bi}$ of the second kind. Explanation =========== The Airy function $\operatorname{Bi}(z)$ is defined to be the function satisfying Airy's differential equation .. math:: \frac{\mathrm{d}^2 w(z)}{\mathrm{d}z^2} - z w(z) = 0. Equivalently, for real $z$ .. math:: \operatorname{Bi}(z) := \frac{1}{\pi} \int_0^\infty \exp\left(-\frac{t^3}{3} + z t\right) + \sin\left(\frac{t^3}{3} + z t\right) \mathrm{d}t. Examples ======== Create an Airy function object: >>> from sympy import airybi >>> from sympy.abc import z >>> airybi(z) airybi(z) Several special values are known: >>> airybi(0) 3**(5/6)/(3*gamma(2/3)) >>> from sympy import oo >>> airybi(oo) oo >>> airybi(-oo) 0 The Airy function obeys the mirror symmetry: >>> from sympy import conjugate >>> conjugate(airybi(z)) airybi(conjugate(z)) Differentiation with respect to $z$ is supported: >>> from sympy import diff >>> diff(airybi(z), z) airybiprime(z) >>> diff(airybi(z), z, 2) z*airybi(z) Series expansion is also supported: >>> from sympy import series >>> series(airybi(z), z, 0, 3) 3**(1/3)*gamma(1/3)/(2*pi) + 3**(2/3)*z*gamma(2/3)/(2*pi) + O(z**3) We can numerically evaluate the Airy function to arbitrary precision on the whole complex plane: >>> airybi(-2).evalf(50) -0.41230258795639848808323405461146104203453483447240 Rewrite $\operatorname{Bi}(z)$ in terms of hypergeometric functions: >>> from sympy import hyper >>> airybi(z).rewrite(hyper) 3**(1/6)*z*hyper((), (4/3,), z**3/9)/gamma(1/3) + 3**(5/6)*hyper((), (2/3,), z**3/9)/(3*gamma(2/3)) See Also ======== airyai: Airy function of the first kind. airyaiprime: Derivative of the Airy function of the first kind. airybiprime: Derivative of the Airy function of the second kind. References ========== .. [1] https://en.wikipedia.org/wiki/Airy_function .. [2] https://dlmf.nist.gov/9 .. [3] https://encyclopediaofmath.org/wiki/Airy_functions .. [4] https://mathworld.wolfram.com/AiryFunctions.html """ nargs = 1 unbranched = True @classmethod def eval(cls, arg): if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.Zero elif arg.is_zero: return S.One / (3**Rational(1, 6) * gamma(Rational(2, 3))) if arg.is_zero: return S.One / (3**Rational(1, 6) * gamma(Rational(2, 3))) def fdiff(self, argindex=1): if argindex == 1: return airybiprime(self.args[0]) else: raise ArgumentIndexError(self, argindex) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0: return S.Zero else: x = sympify(x) if len(previous_terms) > 1: p = previous_terms[-1] return (cbrt(3)*x * Abs(sin(Rational(2, 3)*pi*(n + S.One))) * factorial((n - S.One)/S(3)) / ((n + S.One) * Abs(cos(Rational(2, 3)*pi*(n + S.Half))) * factorial((n - 2)/S(3))) * p) else: return (S.One/(root(3, 6)*pi) * gamma((n + S.One)/S(3)) * Abs(sin(Rational(2, 3)*pi*(n + S.One))) / factorial(n) * (cbrt(3)*x)**n) def _eval_rewrite_as_besselj(self, z, **kwargs): ot = Rational(1, 3) tt = Rational(2, 3) a = Pow(-z, Rational(3, 2)) if re(z).is_negative: return sqrt(-z/3) * (besselj(-ot, tt*a) - besselj(ot, tt*a)) def _eval_rewrite_as_besseli(self, z, **kwargs): ot = Rational(1, 3) tt = Rational(2, 3) a = Pow(z, Rational(3, 2)) if re(z).is_positive: return sqrt(z)/sqrt(3) * (besseli(-ot, tt*a) + besseli(ot, tt*a)) else: b = Pow(a, ot) c = Pow(a, -ot) return sqrt(ot)*(b*besseli(-ot, tt*a) + z*c*besseli(ot, tt*a)) def _eval_rewrite_as_hyper(self, z, **kwargs): pf1 = S.One / (root(3, 6)*gamma(Rational(2, 3))) pf2 = z*root(3, 6) / gamma(Rational(1, 3)) return pf1 * hyper([], [Rational(2, 3)], z**3/9) + pf2 * hyper([], [Rational(4, 3)], z**3/9) def _eval_expand_func(self, **hints): arg = self.args[0] symbs = arg.free_symbols if len(symbs) == 1: z = symbs.pop() c = Wild("c", exclude=[z]) d = Wild("d", exclude=[z]) m = Wild("m", exclude=[z]) n = Wild("n", exclude=[z]) M = arg.match(c*(d*z**n)**m) if M is not None: m = M[m] # The transformation is given by 03.06.16.0001.01 # https://functions.wolfram.com/Bessel-TypeFunctions/AiryBi/16/01/01/0001/ if (3*m).is_integer: c = M[c] d = M[d] n = M[n] pf = (d * z**n)**m / (d**m * z**(m*n)) newarg = c * d**m * z**(m*n) return S.Half * (sqrt(3)*(S.One - pf)*airyai(newarg) + (S.One + pf)*airybi(newarg))
airybi
python
google__pytype
pytype/overlays/special_builtins.py
{ "start": 5505, "end": 6285 }
class ____(BuiltinFunction): """The base class for builtin predicates of the form f(obj, ...) -> bool. Subclasses should implement run() for a specific signature. (See UnaryPredicate and BinaryPredicate for examples.) """ def run(self, node, args, result): raise NotImplementedError(self.__class__.__name__) def call(self, node, func, args, alias_map=None): try: self.match_args(node, args) node = self.ctx.connect_new_cfg_node(node, f"CallPredicate:{self.name}") result = self.ctx.program.NewVariable() self.run(node, args, result) except error_types.InvalidParameters as ex: self.ctx.errorlog.invalid_function_call(self.ctx.vm.frames, ex) result = self.ctx.new_unsolvable(node) return node, result
ObjectPredicate
python
eventlet__eventlet
eventlet/queue.py
{ "start": 18082, "end": 18394 }
class ____(Queue): '''A subclass of :class:`Queue` that retrieves most recently added entries first.''' def _init(self, maxsize): self.queue = [] def _put(self, item): self.queue.append(item) self._put_bookkeeping() def _get(self): return self.queue.pop()
LifoQueue
python
astropy__astropy
astropy/utils/iers/tests/test_leap_second.py
{ "start": 8332, "end": 9027 }
class ____: def setup_class(cls): # Need auto_download so that IERS_B won't be loaded and cause tests to # fail. iers.conf.auto_download = True def teardown_class(cls): # This setting is to be consistent with astropy/conftest.py iers.conf.auto_download = False # In these tests, the results may be cached. # This is fine - no need to download again. def test_iers_url(self): ls = iers.LeapSeconds.auto_open([iers.IERS_LEAP_SECOND_URL]) assert ls.expires > Time.now() def test_ietf_url(self): ls = iers.LeapSeconds.auto_open([iers.IETF_LEAP_SECOND_URL]) assert ls.expires > Time.now()
TestRemoteURLs