language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 7577, "end": 7722 }
class ____(TrigRule): """integrate(sec(x)**2, x) -> tan(x)""" def eval(self) -> Expr: return tan(self.variable) @dataclass
Sec2Rule
python
davidhalter__jedi
test/completion/descriptors.py
{ "start": 915, "end": 1148 }
class ____(): @property def r(self): return 1 @r.setter def r(self, value): return '' def t(self): return '' p = property(t) #? [] B().r(). #? int() B().r #? str() B().p #? [] B().p().
B
python
PyCQA__pylint
pylint/checkers/spelling.py
{ "start": 3316, "end": 3624 }
class ____(RegExFilter): r"""Filter skipping over camelCasedWords. This filter skips any words matching the following regular expression: ^([a-z]\w+[A-Z]+\w+) That is, any words that are camelCasedWords. """ _pattern = re.compile(r"^([a-z]+(\d|[A-Z])(?:\w+)?)")
CamelCasedWord
python
sqlalchemy__sqlalchemy
examples/versioned_rows/versioned_map.py
{ "start": 6674, "end": 9194 }
class ____(Base): """Represent an individual key/value pair at a given point in time. ConfigValue is immutable. """ __tablename__ = "config_value" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) originating_config_id = Column( Integer, ForeignKey("config.id"), nullable=False ) int_value = Column(Integer) string_value = Column(String(255)) def __init__(self, name, value): self.name = name self.value = value originating_config = relationship("ConfigData") """Reference to the originating ConfigData. This is optional, and allows history tracking of individual values. """ def new_version(self, session): raise NotImplementedError("ConfigValue is immutable.") @property def value(self): for k in ("int_value", "string_value"): v = getattr(self, k) if v is not None: return v else: return None @value.setter def value(self, value): if isinstance(value, int): self.int_value = value self.string_value = None else: self.string_value = str(value) self.int_value = None if __name__ == "__main__": engine = create_engine("sqlite://", echo=True) Base.metadata.create_all(engine) Session = sessionmaker(engine) sess = Session() config = ConfigData( {"user_name": "twitter", "hash_id": "4fedffca37eaf", "x": 27, "y": 450} ) sess.add(config) sess.commit() version_one = config.id config.data["user_name"] = "yahoo" sess.commit() version_two = config.id assert version_one != version_two # two versions have been created. assert config.data == { "user_name": "yahoo", "hash_id": "4fedffca37eaf", "x": 27, "y": 450, } old_config = sess.query(ConfigData).get(version_one) assert old_config.data == { "user_name": "twitter", "hash_id": "4fedffca37eaf", "x": 27, "y": 450, } # the history of any key can be acquired using # the originating_config_id attribute history = ( sess.query(ConfigValue) .filter(ConfigValue.name == "user_name") .order_by(ConfigValue.originating_config_id) .all() ) assert [(h.value, h.originating_config_id) for h in history] == ( [("twitter", version_one), ("yahoo", version_two)] )
ConfigValue
python
wandb__wandb
wandb/sdk/artifacts/_generated/delete_registry_members.py
{ "start": 272, "end": 376 }
class ____(GQLResult): success: bool DeleteRegistryMembers.model_rebuild()
DeleteRegistryMembersResult
python
pyinstaller__pyinstaller
tests/functional/specs/several-scripts/main-script1.py
{ "start": 44, "end": 366 }
class ____(basemod.Popen): def __init__(self, *args, **kw): print(inspect.getfile(self.__init__)) print(inspect.getfile(super().__init__)) super().__init__(*args, **kw) # Reduce recursion limit to shorten the traceback. sys.setrecursionlimit(50) basemod.Popen = _Popen p = basemod.Popen()
_Popen
python
kamyu104__LeetCode-Solutions
Python/count-vowels-permutation.py
{ "start": 51, "end": 955 }
class ____(object): def countVowelPermutation(self, n): """ :type n: int :rtype: int """ def matrix_expo(A, K): result = [[int(i==j) for j in xrange(len(A))] \ for i in xrange(len(A))] while K: if K % 2: result = matrix_mult(result, A) A = matrix_mult(A, A) K /= 2 return result def matrix_mult(A, B): ZB = zip(*B) return [[sum(a*b for a, b in itertools.izip(row, col)) % MOD \ for col in ZB] for row in A] MOD = 10**9 + 7 T = [[0, 1, 1, 0, 1], [1, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 0, 1, 1, 0]] return sum(map(sum, matrix_expo(T, n-1))) % MOD # Time: O(n) # Space: O(1)
Solution
python
pydantic__pydantic
pydantic-core/python/pydantic_core/core_schema.py
{ "start": 52506, "end": 58003 }
class ____(TypedDict, total=False): type: Required[Literal['list']] items_schema: CoreSchema min_length: int max_length: int fail_fast: bool strict: bool ref: str metadata: dict[str, Any] serialization: IncExSeqOrElseSerSchema def list_schema( items_schema: CoreSchema | None = None, *, min_length: int | None = None, max_length: int | None = None, fail_fast: bool | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, serialization: IncExSeqOrElseSerSchema | None = None, ) -> ListSchema: """ Returns a schema that matches a list value, e.g.: ```py from pydantic_core import SchemaValidator, core_schema schema = core_schema.list_schema(core_schema.int_schema(), min_length=0, max_length=10) v = SchemaValidator(schema) assert v.validate_python(['4']) == [4] ``` Args: items_schema: The value must be a list of items that match this schema min_length: The value must be a list with at least this many items max_length: The value must be a list with at most this many items fail_fast: Stop validation on the first error strict: The value must be a list with exactly this many items ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ return _dict_not_none( type='list', items_schema=items_schema, min_length=min_length, max_length=max_length, fail_fast=fail_fast, strict=strict, ref=ref, metadata=metadata, serialization=serialization, ) # @deprecated('tuple_positional_schema is deprecated. Use pydantic_core.core_schema.tuple_schema instead.') def tuple_positional_schema( items_schema: list[CoreSchema], *, extras_schema: CoreSchema | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, serialization: IncExSeqOrElseSerSchema | None = None, ) -> TupleSchema: """ Returns a schema that matches a tuple of schemas, e.g.: ```py from pydantic_core import SchemaValidator, core_schema schema = core_schema.tuple_positional_schema( [core_schema.int_schema(), core_schema.str_schema()] ) v = SchemaValidator(schema) assert v.validate_python((1, 'hello')) == (1, 'hello') ``` Args: items_schema: The value must be a tuple with items that match these schemas extras_schema: The value must be a tuple with items that match this schema This was inspired by JSON schema's `prefixItems` and `items` fields. In python's `typing.Tuple`, you can't specify a type for "extra" items -- they must all be the same type if the length is variable. So this field won't be set from a `typing.Tuple` annotation on a pydantic model. strict: The value must be a tuple with exactly this many items ref: optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ if extras_schema is not None: variadic_item_index = len(items_schema) items_schema = items_schema + [extras_schema] else: variadic_item_index = None return tuple_schema( items_schema=items_schema, variadic_item_index=variadic_item_index, strict=strict, ref=ref, metadata=metadata, serialization=serialization, ) # @deprecated('tuple_variable_schema is deprecated. Use pydantic_core.core_schema.tuple_schema instead.') def tuple_variable_schema( items_schema: CoreSchema | None = None, *, min_length: int | None = None, max_length: int | None = None, strict: bool | None = None, ref: str | None = None, metadata: dict[str, Any] | None = None, serialization: IncExSeqOrElseSerSchema | None = None, ) -> TupleSchema: """ Returns a schema that matches a tuple of a given schema, e.g.: ```py from pydantic_core import SchemaValidator, core_schema schema = core_schema.tuple_variable_schema( items_schema=core_schema.int_schema(), min_length=0, max_length=10 ) v = SchemaValidator(schema) assert v.validate_python(('1', 2, 3)) == (1, 2, 3) ``` Args: items_schema: The value must be a tuple with items that match this schema min_length: The value must be a tuple with at least this many items max_length: The value must be a tuple with at most this many items strict: The value must be a tuple with exactly this many items ref: Optional unique identifier of the schema, used to reference the schema in other places metadata: Any other information you want to include with the schema, not used by pydantic-core serialization: Custom serialization schema """ return tuple_schema( items_schema=[items_schema or any_schema()], variadic_item_index=0, min_length=min_length, max_length=max_length, strict=strict, ref=ref, metadata=metadata, serialization=serialization, )
ListSchema
python
MongoEngine__mongoengine
tests/all_warnings/test_warnings.py
{ "start": 273, "end": 1214 }
class ____(unittest.TestCase): def setUp(self): connect(db="mongoenginetest") self.warning_list = [] self.showwarning_default = warnings.showwarning warnings.showwarning = self.append_to_warning_list def append_to_warning_list(self, message, category, *args): self.warning_list.append({"message": message, "category": category}) def tearDown(self): # restore default handling of warnings warnings.showwarning = self.showwarning_default def test_document_collection_syntax_warning(self): class NonAbstractBase(Document): meta = {"allow_inheritance": True} class InheritedDocumentFailTest(NonAbstractBase): meta = {"collection": "fail"} warning = self.warning_list[0] assert SyntaxWarning == warning["category"] assert "non_abstract_base" == InheritedDocumentFailTest._get_collection_name()
TestAllWarnings
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 23403, "end": 23695 }
class ____(BlasNotFoundError): """ Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable."""
BlasSrcNotFoundError
python
django__django
django/template/smartif.py
{ "start": 4551, "end": 6426 }
class ____: error_class = ValueError def __init__(self, tokens): # Turn 'is','not' and 'not','in' into single tokens. num_tokens = len(tokens) mapped_tokens = [] i = 0 while i < num_tokens: token = tokens[i] if token == "is" and i + 1 < num_tokens and tokens[i + 1] == "not": token = "is not" i += 1 # skip 'not' elif token == "not" and i + 1 < num_tokens and tokens[i + 1] == "in": token = "not in" i += 1 # skip 'in' mapped_tokens.append(self.translate_token(token)) i += 1 self.tokens = mapped_tokens self.pos = 0 self.current_token = self.next_token() def translate_token(self, token): try: op = OPERATORS[token] except (KeyError, TypeError): return self.create_var(token) else: return op() def next_token(self): if self.pos >= len(self.tokens): return EndToken else: retval = self.tokens[self.pos] self.pos += 1 return retval def parse(self): retval = self.expression() # Check that we have exhausted all the tokens if self.current_token is not EndToken: raise self.error_class( "Unused '%s' at end of if expression." % self.current_token.display() ) return retval def expression(self, rbp=0): t = self.current_token self.current_token = self.next_token() left = t.nud(self) while rbp < self.current_token.lbp: t = self.current_token self.current_token = self.next_token() left = t.led(left, self) return left def create_var(self, value): return Literal(value)
IfParser
python
h5py__h5py
h5py/tests/test_dataset_getitem.py
{ "start": 4955, "end": 7245 }
class ____(TestCase): def setUp(self): TestCase.setUp(self) self.data = np.array((42.5, -118, "Hello"), dtype=[('a', 'f'), ('b', 'i'), ('c', '|S10')]) self.dset = self.f.create_dataset('x', data=self.data) def test_ndim(self): """ Verify number of dimensions """ self.assertEqual(self.dset.ndim, 0) def test_shape(self): """ Verify shape """ self.assertEqual(self.dset.shape, tuple()) def test_size(self): """ Verify size """ self.assertEqual(self.dset.size, 1) def test_nbytes(self): """ Verify nbytes """ self.assertEqual(self.dset.nbytes, self.data.dtype.itemsize) def test_ellipsis(self): """ Ellipsis -> scalar ndarray """ out = self.dset[...] # assertArrayEqual doesn't work with compounds; do manually self.assertIsInstance(out, np.ndarray) self.assertEqual(out.shape, self.data.shape) self.assertEqual(out.dtype, self.data.dtype) def test_tuple(self): """ () -> np.void instance """ out = self.dset[()] self.assertIsInstance(out, np.void) self.assertEqual(out.dtype, self.data.dtype) def test_slice(self): """ slice -> ValueError """ with self.assertRaises(ValueError): self.dset[0:4] def test_multi_block_slice(self): """ MultiBlockSlice -> ValueError """ with self.assertRaises(ValueError): self.dset[h5py.MultiBlockSlice()] def test_index(self): """ index -> ValueError """ with self.assertRaises(ValueError): self.dset[0] # FIXME: NumPy has IndexError instead def test_indexlist(self): """ index list -> ValueError """ with self.assertRaises(ValueError): self.dset[[1,2,5]] # FIXME: NumPy permits this def test_mask(self): """ mask -> ValueError """ mask = np.array(True, dtype='bool') with self.assertRaises(ValueError): self.dset[mask] # FIXME: NumPy returns a scalar ndarray def test_fieldnames(self): """ field name -> bare value """ out = self.dset['a'] self.assertIsInstance(out, np.float32) self.assertEqual(out, self.dset['a'])
TestScalarCompound
python
getsentry__sentry
src/sentry/issues/issue_occurrence.py
{ "start": 573, "end": 1222 }
class ____(TypedDict): id: str project_id: int event_id: str fingerprint: Sequence[str] issue_title: str subtitle: str resource_id: str | None evidence_data: Mapping[str, Any] evidence_display: Sequence[IssueEvidenceData] type: int detection_time: float level: str | None culprit: str | None assignee: NotRequired[str | None] priority: NotRequired[int | None] """ Who to assign the issue to when creating a new issue. Has no effect on existing issues. In the format of an Actor identifier, as defined in `Actor.from_identifier` """ @dataclass(frozen=True)
IssueOccurrenceData
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_embed_image13.py
{ "start": 315, "end": 1477 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("embed_image13.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet1.embed_image(0, 0, self.image_dir + "red.png") worksheet1.embed_image(2, 0, self.image_dir + "blue.png") worksheet1.embed_image(4, 0, self.image_dir + "yellow.png") worksheet2 = workbook.add_worksheet() worksheet2.embed_image(0, 0, self.image_dir + "yellow.png") worksheet2.embed_image(2, 0, self.image_dir + "red.png") worksheet2.embed_image(4, 0, self.image_dir + "blue.png") worksheet3 = workbook.add_worksheet() worksheet3.embed_image(0, 0, self.image_dir + "blue.png") worksheet3.embed_image(2, 0, self.image_dir + "yellow.png") worksheet3.embed_image(4, 0, self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
ray-project__ray
python/ray/dag/tests/experimental/test_torch_tensor_transport.py
{ "start": 11872, "end": 14598 }
class ____: """Tests worker to worker tensor transport with CPU device.""" def test_src_cpu_tensor_dst_cpu_node(self, ray_start_regular): sender = Actor.remote() receiver = Actor.remote() ref = run_worker_to_worker_dag(sender, receiver, "cpu", "cpu") assert ray.get(ref) == "cpu" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_cpu_tensor_dst_gpu_node(self, ray_start_regular): sender = Actor.remote() receiver = Actor.options(num_gpus=1).remote() ref = run_worker_to_worker_dag(sender, receiver, "cpu", "cpu") assert ray.get(ref) == "cpu" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_gpu_tensor_dst_cpu_node(self, ray_start_regular): sender = Actor.options(num_gpus=1).remote() receiver = Actor.remote() ref = run_worker_to_worker_dag(sender, receiver, "cpu", "cpu") assert ray.get(ref) == "cpu" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") @pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 2}], indirect=True) def test_src_gpu_tensor_dst_gpu_node(self, ray_start_regular): sender = Actor.options(num_gpus=1).remote() receiver = Actor.options(num_gpus=1).remote() with pytest.raises( ValueError, match="accelerator transport is not supported with CPU target device.", ): run_worker_to_worker_dag(sender, receiver, "cpu", "cpu") @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_mix_tensors_dst_cpu_node(self, ray_start_regular): sender = Actor.options(num_gpus=1).remote() receiver = Actor.options().remote() ref = run_worker_to_worker_dag( sender, receiver, "cpu", {"cpu_tensor": "cpu", "gpu_tensor": "cuda"}, is_dict=True, ) assert ray.get(ref) == {"cpu_tensor": "cpu", "gpu_tensor": "cpu"} @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") @pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 2}], indirect=True) def test_src_mix_tensors_dst_gpu_node(self, ray_start_regular): sender = Actor.options(num_gpus=1).remote() receiver = Actor.options(num_gpus=1).remote() with pytest.raises( ValueError, match="accelerator transport is not supported with CPU target device.", ): run_worker_to_worker_dag( sender, receiver, "cpu", {"cpu_tensor": "cpu", "gpu_tensor": "cuda"}, is_dict=True, )
TestWorkerToWorkerDeviceCPU
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink28.py
{ "start": 315, "end": 1670 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink28.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"hyperlink": True}) worksheet.write_url("A1", "http://www.perl.org/", cell_format) workbook.close() self.assertExcelEqual() def test_create_file_with_workbook_format(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.get_default_url_format() worksheet.write_url("A1", "http://www.perl.org/", cell_format) workbook.close() self.assertExcelEqual() def test_create_file_with_default_format(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write_url("A1", "http://www.perl.org/") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
python-openxml__python-docx
tests/oxml/test__init__.py
{ "start": 282, "end": 1195 }
class ____: def it_returns_an_lxml_element_with_matching_tag_name(self): element = OxmlElement("a:foo") assert isinstance(element, etree._Element) assert element.tag == ("{http://schemas.openxmlformats.org/drawingml/2006/main}foo") def it_adds_supplied_attributes(self): element = OxmlElement("a:foo", {"a": "b", "c": "d"}) assert etree.tostring(element) == ( '<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" a="b" c="d"/>' ).encode("utf-8") def it_adds_additional_namespace_declarations_when_supplied(self): ns1 = "http://schemas.openxmlformats.org/drawingml/2006/main" ns2 = "other" element = OxmlElement("a:foo", nsdecls={"a": ns1, "x": ns2}) assert len(element.nsmap.items()) == 2 assert element.nsmap["a"] == ns1 assert element.nsmap["x"] == ns2
DescribeOxmlElement
python
Pylons__pyramid
src/pyramid/httpexceptions.py
{ "start": 36544, "end": 37068 }
class ____(HTTPServerError): """ subclass of :class:`~HTTPServerError` This indicates that the server, while acting as a gateway or proxy, did not receive a timely response from the upstream server specified by the URI (e.g. HTTP, FTP, LDAP) or some other auxiliary server (e.g. DNS) it needed to access in attempting to complete the request. code: 504, title: Gateway Timeout """ code = 504 title = 'Gateway Timeout' explanation = 'The gateway has timed out.'
HTTPGatewayTimeout
python
django__django
tests/backends/test_ddl_references.py
{ "start": 3035, "end": 3774 }
class ____(ColumnsTests): def setUp(self): def create_index_name(table_name, column_names, suffix): return ", ".join( "%s_%s_%s" % (table_name, column_name, suffix) for column_name in column_names ) self.reference = IndexName( "table", ["first_column", "second_column"], "suffix", create_index_name ) def test_repr(self): self.assertEqual( repr(self.reference), "<IndexName 'table_first_column_suffix, table_second_column_suffix'>", ) def test_str(self): self.assertEqual( str(self.reference), "table_first_column_suffix, table_second_column_suffix" )
IndexNameTests
python
dask__distributed
distributed/comm/tcp.py
{ "start": 4873, "end": 14755 }
class ____(Comm): """ An established communication based on an underlying Tornado IOStream. """ max_shard_size: ClassVar[int] = dask.utils.parse_bytes( dask.config.get("distributed.comm.shard") ) stream: IOStream | None def __init__( self, stream: IOStream, local_addr: str, peer_addr: str, deserialize: bool = True, ): self._closed = False super().__init__(deserialize=deserialize) self._local_addr = local_addr self._peer_addr = peer_addr self.stream = stream self._finalizer = weakref.finalize(self, self._get_finalizer()) self._finalizer.atexit = False self._extra: dict = {} ref = weakref.ref(self) stream.set_close_callback(functools.partial(_close_comm, ref)) stream.set_nodelay(True) set_tcp_timeout(stream) self._read_extra() def _read_extra(self): pass def _get_finalizer(self): r = repr(self) def finalize(stream=self.stream, r=r): # stream is None if a StreamClosedError is raised during interpreter # shutdown if stream is not None and not stream.closed(): logger.warning(f"Closing dangling stream in {r}") stream.close() return finalize @property def local_address(self) -> str: return self._local_addr @property def peer_address(self) -> str: return self._peer_addr async def read(self, deserializers=None): stream = self.stream if stream is None: raise CommClosedError() fmt = "Q" fmt_size = struct.calcsize(fmt) try: # Don't store multiple numpy or parquet buffers into the same buffer, or # none will be released until all are released. frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size) (frames_nosplit_nbytes,) = struct.unpack(fmt, frames_nosplit_nbytes_bin) frames_nosplit = await read_bytes_rw(stream, frames_nosplit_nbytes) frames, buffers_nbytes = unpack_frames(frames_nosplit, partial=True) for buffer_nbytes in buffers_nbytes: buffer = await read_bytes_rw(stream, buffer_nbytes) frames.append(buffer) except (StreamClosedError, SSLError) as e: self.stream = None self._closed = True convert_stream_closed_error(self, e) except BaseException: # Some OSError, CancelledError or another "low-level" exception. # We do not really know what was already read from the underlying # socket, so it is not even safe to retry here using the same stream. # The only safe thing to do is to abort. # (See also GitHub #4133, #6548). self.abort() raise else: try: msg = await from_frames( frames, deserialize=self.deserialize, deserializers=deserializers, allow_offload=self.allow_offload, ) except EOFError: # Frames possibly garbled or truncated by communication error self.abort() raise CommClosedError("aborted stream on truncated data") return msg async def write(self, msg, serializers=None, on_error="message"): stream = self.stream if stream is None: raise CommClosedError() frames = await to_frames( msg, allow_offload=self.allow_offload, serializers=serializers, on_error=on_error, context={ "sender": self.local_info, "recipient": self.remote_info, **self.handshake_options, }, frame_split_size=self.max_shard_size, ) frames, frames_nbytes, frames_nbytes_total = _add_frames_header(frames) try: # trick to enqueue all frames for writing beforehand for each_frame_nbytes, each_frame in zip(frames_nbytes, frames): if each_frame_nbytes: # Make sure that `len(data) == data.nbytes` # See <https://github.com/tornadoweb/tornado/pull/2996> each_frame = ensure_memoryview(each_frame) for i, j in sliding_window( 2, range( 0, each_frame_nbytes + C_INT_MAX, C_INT_MAX, ), ): chunk = each_frame[i:j] chunk_nbytes = chunk.nbytes if stream._write_buffer is None: raise StreamClosedError() stream._write_buffer.append(chunk) stream._total_write_index += chunk_nbytes # start writing frames stream.write(b"") except StreamClosedError as e: self.stream = None self._closed = True convert_stream_closed_error(self, e) except BaseException: # Some OSError or a another "low-level" exception. We do not really know # what was already written to the underlying socket, so it is not even safe # to retry here using the same stream. The only safe thing to do is to # abort. (See also GitHub #4133). # In case of, for instance, KeyboardInterrupts or other # BaseExceptions that could be handled further upstream, we equally # want to discard this comm self.abort() raise return frames_nbytes_total @gen.coroutine def close(self): # We use gen.coroutine here rather than async def to avoid errors like # Task was destroyed but it is pending! # Triggered by distributed.deploy.tests.test_local::test_silent_startup stream, self.stream = self.stream, None self._closed = True if stream is not None and not stream.closed(): try: # Flush the stream's write buffer by waiting for a last write. if stream.writing(): yield stream.write(b"") stream.socket.shutdown(socket.SHUT_RDWR) except OSError: pass finally: self._finalizer.detach() stream.close() def abort(self) -> None: stream, self.stream = self.stream, None self._closed = True if stream is not None and not stream.closed(): self._finalizer.detach() stream.close() def closed(self) -> bool: return self._closed @property def extra_info(self): return self._extra async def read_bytes_rw(stream: IOStream, n: int) -> memoryview: """Read n bytes from stream. Unlike stream.read_bytes, allow for very large messages and return a writeable buffer. """ buf = host_array(n) for i, j in sliding_window( 2, range(0, n + C_INT_MAX, C_INT_MAX), ): chunk = buf[i:j] actual = await stream.read_into(chunk) # type: ignore[arg-type] assert actual == chunk.nbytes return buf def _add_frames_header( frames: list[bytes | memoryview], ) -> tuple[list[bytes | memoryview], list[int], int]: """ """ frames_nbytes = [nbytes(f) for f in frames] frames_nbytes_total = sum(frames_nbytes) # Calculate the number of bytes that are inclusive of: # - prelude # - msgpack header # - simple pickle bytes # - compressed buffers # - first uncompressed buffer (possibly sharded), IFF the pickle bytes are # negligible in size # # All these can be fetched by read() into a single buffer with a single call to # Tornado, because they will be dereferenced soon after they are deserialized. # Read uncompressed numpy/parquet buffers, which will survive indefinitely past # the end of read(), into their own host arrays so that their memory can be # released independently. frames_nbytes_nosplit = 0 first_uncompressed_buffer: object = None for frame, nb in zip(frames, frames_nbytes): buffer = frame.obj if isinstance(frame, memoryview) else frame if not isinstance(buffer, bytes): # Uncompressed buffer; it will be referenced by the unpickled object if first_uncompressed_buffer is None: if frames_nbytes_nosplit > max(2048, nb * 0.05): # Don't extend the lifespan of non-trivial amounts of pickled bytes # to that of the buffers break first_uncompressed_buffer = buffer elif first_uncompressed_buffer is not buffer: # don't split sharded frame # Always store 2+ separate numpy/parquet objects onto separate # buffers break frames_nbytes_nosplit += nb header = pack_frames_prelude(frames) header = struct.pack("Q", nbytes(header) + frames_nbytes_nosplit) + header header_nbytes = nbytes(header) frames = [header, *frames] frames_nbytes = [header_nbytes, *frames_nbytes] frames_nbytes_total += header_nbytes if frames_nbytes_total < 2**17 or ( # 128 kiB total frames_nbytes_total < 2**25 # 32 MiB total and frames_nbytes_total // len(frames) < 2**15 # 32 kiB mean ): # very small or very fragmented; send in one go frames = [b"".join(frames)] frames_nbytes = [frames_nbytes_total] return frames, frames_nbytes, frames_nbytes_total
TCP
python
tensorflow__tensorflow
tensorflow/python/util/nest_test.py
{ "start": 1935, "end": 2146 }
class ____(collections.abc.Sequence): def __len__(self): return 1 def __getitem__(self, item): raise ValueError("Cannot get item: %s" % item) @dataclasses.dataclass
_CustomSequenceThatRaisesException
python
Farama-Foundation__Gymnasium
gymnasium/wrappers/vector/vectorize_reward.py
{ "start": 3094, "end": 4304 }
class ____(VectorizeTransformReward): """A wrapper that clips the rewards for an environment between an upper and lower bound. Example with clipped rewards: >>> import numpy as np >>> import gymnasium as gym >>> envs = gym.make_vec("MountainCarContinuous-v0", num_envs=3) >>> envs = ClipReward(envs, 0.0, 2.0) >>> _ = envs.action_space.seed(123) >>> obs, info = envs.reset(seed=123) >>> for _ in range(10): ... obs, rew, term, trunc, info = envs.step(0.5 * np.ones((3, 1))) ... >>> envs.close() >>> rew array([0., 0., 0.]) """ def __init__( self, env: VectorEnv, min_reward: float | np.ndarray | None = None, max_reward: float | np.ndarray | None = None, ): """Constructor for ClipReward wrapper. Args: env: The vector environment to wrap min_reward: The min reward for each step max_reward: the max reward for each step """ super().__init__( env, transform_reward.ClipReward, min_reward=min_reward, max_reward=max_reward, )
ClipReward
python
PyCQA__pylint
doc/data/messages/a/access-member-before-definition/bad.py
{ "start": 0, "end": 243 }
class ____: def __init__(self, fluffiness_level): if self.fluffiness_level > 9000: # [access-member-before-definition] print("It's OVER-FLUFFYYYY ! *crush glasses*") self.fluffiness_level = fluffiness_level
Unicorn
python
numba__numba
numba/pycc/cc.py
{ "start": 9463, "end": 10626 }
class ____(Extension): """ A Numba-specific Extension subclass to LLVM-compile pure Python code to an extension module. """ _cc = None _distutils_monkey_patched = False def _prepare_object_files(self, build_ext): cc = self._cc dir_util.mkpath(os.path.join(build_ext.build_temp, *self.name.split('.')[:-1])) objects, _ = cc._compile_object_files(build_ext.build_temp) # Add generated object files for linking self.extra_objects = objects @classmethod def monkey_patch_distutils(cls): """ Monkey-patch distutils with our own build_ext class knowing about pycc-compiled extensions modules. """ if cls._distutils_monkey_patched: return _orig_build_ext = build_ext.build_ext class _CC_build_ext(_orig_build_ext): def build_extension(self, ext): if isinstance(ext, _CCExtension): ext._prepare_object_files(self) _orig_build_ext.build_extension(self, ext) build_ext.build_ext = _CC_build_ext cls._distutils_monkey_patched = True
_CCExtension
python
realpython__materials
wordcount/tests/realpython/exceptions.py
{ "start": 0, "end": 197 }
class ____(AssertionError): def __init__(self, expected, actual, message=None): self.expected = expected self.actual = actual self.message = message
RealPythonAssertionError
python
scikit-learn__scikit-learn
sklearn/linear_model/_logistic.py
{ "start": 26598, "end": 50992 }
class ____(LinearClassifierMixin, SparseCoefMixin, BaseEstimator): """ Logistic Regression (aka logit, MaxEnt) classifier. This class implements regularized logistic regression using a set of available solvers. **Note that regularization is applied by default**. It can handle both dense and sparse input `X`. Use C-ordered arrays or CSR matrices containing 64-bit floats for optimal performance; any other input format will be converted (and copied). The solvers 'lbfgs', 'newton-cg', 'newton-cholesky' and 'sag' support only L2 regularization with primal formulation, or no regularization. The 'liblinear' solver supports both L1 and L2 regularization (but not both, i.e. elastic-net), with a dual formulation only for the L2 penalty. The Elastic-Net (combination of L1 and L2) regularization is only supported by the 'saga' solver. For :term:`multiclass` problems (whenever `n_classes >= 3`), all solvers except 'liblinear' optimize the (penalized) multinomial loss. 'liblinear' only handles binary classification but can be extended to handle multiclass by using :class:`~sklearn.multiclass.OneVsRestClassifier`. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- penalty : {'l1', 'l2', 'elasticnet', None}, default='l2' Specify the norm of the penalty: - `None`: no penalty is added; - `'l2'`: add a L2 penalty term and it is the default choice; - `'l1'`: add a L1 penalty term; - `'elasticnet'`: both L1 and L2 penalty terms are added. .. warning:: Some penalties may not work with some solvers. See the parameter `solver` below, to know the compatibility between the penalty and solver. .. versionadded:: 0.19 l1 penalty with SAGA solver (allowing 'multinomial' + L1) .. deprecated:: 1.8 `penalty` was deprecated in version 1.8 and will be removed in 1.10. Use `l1_ratio` instead. `l1_ratio=0` for `penalty='l2'`, `l1_ratio=1` for `penalty='l1'` and `l1_ratio` set to any float between 0 and 1 for `'penalty='elasticnet'`. C : float, default=1.0 Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. `C=np.inf` results in unpenalized logistic regression. For a visual example on the effect of tuning the `C` parameter with an L1 penalty, see: :ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`. l1_ratio : float, default=0.0 The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1`. Setting `l1_ratio=1` gives a pure L1-penalty, setting `l1_ratio=0` a pure L2-penalty. Any value between 0 and 1 gives an Elastic-Net penalty of the form `l1_ratio * L1 + (1 - l1_ratio) * L2`. .. warning:: Certain values of `l1_ratio`, i.e. some penalties, may not work with some solvers. See the parameter `solver` below, to know the compatibility between the penalty and solver. .. versionchanged:: 1.8 Default value changed from None to 0.0. .. deprecated:: 1.8 `None` is deprecated and will be removed in version 1.10. Always use `l1_ratio` to specify the penalty type. dual : bool, default=False Dual (constrained) or primal (regularized, see also :ref:`this equation <regularized-logistic-loss>`) formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer `dual=False` when n_samples > n_features. tol : float, default=1e-4 Tolerance for stopping criteria. fit_intercept : bool, default=True Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. intercept_scaling : float, default=1 Useful only when the solver `liblinear` is used and `self.fit_intercept` is set to `True`. In this case, `x` becomes `[x, self.intercept_scaling]`, i.e. a "synthetic" feature with constant value equal to `intercept_scaling` is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. .. note:: The synthetic feature weight is subject to L1 or L2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) `intercept_scaling` has to be increased. class_weight : dict or 'balanced', default=None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. .. versionadded:: 0.17 *class_weight='balanced'* random_state : int, RandomState instance, default=None Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the data. See :term:`Glossary <random_state>` for details. solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ default='lbfgs' Algorithm to use in the optimization problem. Default is 'lbfgs'. To choose a solver, you might want to consider the following aspects: - 'lbfgs' is a good default solver because it works reasonably well for a wide class of problems. - For :term:`multiclass` problems (`n_classes >= 3`), all solvers except 'liblinear' minimize the full multinomial loss, 'liblinear' will raise an error. - 'newton-cholesky' is a good choice for `n_samples` >> `n_features * n_classes`, especially with one-hot encoded categorical features with rare categories. Be aware that the memory usage of this solver has a quadratic dependency on `n_features * n_classes` because it explicitly computes the full Hessian matrix. - For small datasets, 'liblinear' is a good choice, whereas 'sag' and 'saga' are faster for large ones; - 'liblinear' can only handle binary classification by default. To apply a one-versus-rest scheme for the multiclass setting one can wrap it with the :class:`~sklearn.multiclass.OneVsRestClassifier`. .. warning:: The choice of the algorithm depends on the penalty chosen (`l1_ratio=0` for L2-penalty, `l1_ratio=1` for L1-penalty and `0 < l1_ratio < 1` for Elastic-Net) and on (multinomial) multiclass support: ================= ======================== ====================== solver l1_ratio multinomial multiclass ================= ======================== ====================== 'lbfgs' l1_ratio=0 yes 'liblinear' l1_ratio=1 or l1_ratio=0 no 'newton-cg' l1_ratio=0 yes 'newton-cholesky' l1_ratio=0 yes 'sag' l1_ratio=0 yes 'saga' 0<=l1_ratio<=1 yes ================= ======================== ====================== .. note:: 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from :mod:`sklearn.preprocessing`. .. seealso:: Refer to the :ref:`User Guide <Logistic_regression>` for more information regarding :class:`LogisticRegression` and more specifically the :ref:`Table <logistic_regression_solvers>` summarizing solver/penalty supports. .. versionadded:: 0.17 Stochastic Average Gradient (SAG) descent solver. Multinomial support in version 0.18. .. versionadded:: 0.19 SAGA solver. .. versionchanged:: 0.22 The default solver changed from 'liblinear' to 'lbfgs' in 0.22. .. versionadded:: 1.2 newton-cholesky solver. Multinomial support in version 1.6. max_iter : int, default=100 Maximum number of iterations taken for the solvers to converge. verbose : int, default=0 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Useless for liblinear solver. See :term:`the Glossary <warm_start>`. .. versionadded:: 0.17 *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers. n_jobs : int, default=None Does not have any effect. .. deprecated:: 1.8 `n_jobs` is deprecated in version 1.8 and will be removed in 1.10. Attributes ---------- classes_ : ndarray of shape (n_classes, ) A list of class labels known to the classifier. coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) Coefficient of the features in the decision function. `coef_` is of shape (1, n_features) when the given problem is binary. intercept_ : ndarray of shape (1,) or (n_classes,) Intercept (a.k.a. bias) added to the decision function. If `fit_intercept` is set to False, the intercept is set to zero. `intercept_` is of shape (1,) when the given problem is binary. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_iter_ : ndarray of shape (1, ) Actual number of iterations for all classes. .. versionchanged:: 0.20 In SciPy <= 1.0.0 the number of lbfgs iterations may exceed ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. See Also -------- SGDClassifier : Incrementally trained logistic regression (when given the parameter ``loss="log_loss"``). LogisticRegressionCV : Logistic regression with built-in cross validation. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon, to have slightly different results for the same input data. If that happens, try with a smaller tol parameter. Predict output may not match that of standalone liblinear in certain cases. See :ref:`differences from liblinear <liblinear_differences>` in the narrative documentation. References ---------- L-BFGS-B -- Software for Large-scale Bound-constrained Optimization Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales. http://users.iems.northwestern.edu/~nocedal/lbfgsb.html LIBLINEAR -- A Library for Large Linear Classification https://www.csie.ntu.edu.tw/~cjlin/liblinear/ SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach Minimizing Finite Sums with the Stochastic Average Gradient https://hal.inria.fr/hal-00860051/document SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014). :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives" <1407.0202>` Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent methods for logistic regression and maximum entropy models. Machine Learning 85(1-2):41-75. https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.linear_model import LogisticRegression >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(random_state=0).fit(X, y) >>> clf.predict(X[:2, :]) array([0, 0]) >>> clf.predict_proba(X[:2, :]) array([[9.82e-01, 1.82e-02, 1.44e-08], [9.72e-01, 2.82e-02, 3.02e-08]]) >>> clf.score(X, y) 0.97 For a comparison of the LogisticRegression with other classifiers see: :ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`. """ _parameter_constraints: dict = { "penalty": [ StrOptions({"l1", "l2", "elasticnet"}), None, Hidden(StrOptions({"deprecated"})), ], "C": [Interval(Real, 0, None, closed="right")], "l1_ratio": [Interval(Real, 0, 1, closed="both"), None], "dual": ["boolean"], "tol": [Interval(Real, 0, None, closed="left")], "fit_intercept": ["boolean"], "intercept_scaling": [Interval(Real, 0, None, closed="neither")], "class_weight": [dict, StrOptions({"balanced"}), None], "random_state": ["random_state"], "solver": [ StrOptions( {"lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga"} ) ], "max_iter": [Interval(Integral, 0, None, closed="left")], "verbose": ["verbose"], "warm_start": ["boolean"], "n_jobs": [None, Integral], } def __init__( self, penalty="deprecated", *, C=1.0, l1_ratio=0.0, dual=False, tol=1e-4, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver="lbfgs", max_iter=100, verbose=0, warm_start=False, n_jobs=None, ): self.penalty = penalty self.C = C self.l1_ratio = l1_ratio self.dual = dual self.tol = tol self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.class_weight = class_weight self.random_state = random_state self.solver = solver self.max_iter = max_iter self.verbose = verbose self.warm_start = warm_start self.n_jobs = n_jobs @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """ Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. sample_weight : array-like of shape (n_samples,) default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.17 *sample_weight* support to LogisticRegression. Returns ------- self Fitted estimator. Notes ----- The SAGA solver supports both float64 and float32 bit arrays. """ if self.penalty == "deprecated": if self.l1_ratio == 0 or self.l1_ratio is None: penalty = "l2" if self.l1_ratio is None: warnings.warn( ( "'l1_ratio=None' was deprecated in version 1.8 and will " "trigger an error in 1.10. Use 0<=l1_ratio<=1 instead." ), FutureWarning, ) elif self.l1_ratio == 1: penalty = "l1" else: penalty = "elasticnet" if self.C == np.inf: penalty = None else: penalty = self.penalty warnings.warn( ( "'penalty' was deprecated in version 1.8 and will be removed in" " 1.10. To avoid this warning, leave 'penalty' set to its default" " value and use 'l1_ratio' or 'C' instead." " Use l1_ratio=0 instead of penalty='l2'," " l1_ratio=1 instead of penalty='l1', and " "C=np.inf instead of penalty=None." ), FutureWarning, ) solver = _check_solver(self.solver, penalty, self.dual) if penalty != "elasticnet" and ( self.l1_ratio is not None and 0 < self.l1_ratio < 1 ): warnings.warn( "l1_ratio parameter is only used when penalty is " "'elasticnet'. Got " "(penalty={})".format(penalty) ) if (self.penalty == "l2" and self.l1_ratio != 0) or ( self.penalty == "l1" and self.l1_ratio != 1 ): warnings.warn( f"Inconsistent values: penalty={self.penalty} with " f"l1_ratio={self.l1_ratio}. penalty is deprecated. Please use " f"l1_ratio only." ) if penalty == "elasticnet" and self.l1_ratio is None: raise ValueError("l1_ratio must be specified when penalty is elasticnet.") if penalty is None: if self.C != 1.0: # default values warnings.warn( "Setting penalty=None will ignore the C and l1_ratio parameters" ) # Note that check for l1_ratio is done right above C_ = np.inf penalty = "l2" else: C_ = self.C msg = ( "'n_jobs' has no effect since 1.8 and will be removed in 1.10. " f"You provided 'n_jobs={self.n_jobs}', please leave it unspecified." ) if self.n_jobs is not None: warnings.warn(msg, category=FutureWarning) if solver == "lbfgs": _dtype = np.float64 else: _dtype = [np.float64, np.float32] X, y = validate_data( self, X, y, accept_sparse="csr", dtype=_dtype, order="C", accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) n_features = X.shape[1] check_classification_targets(y) self.classes_ = np.unique(y) n_classes = len(self.classes_) is_binary = n_classes == 2 if solver == "liblinear": if not is_binary: raise ValueError( "The 'liblinear' solver does not support multiclass classification" " (n_classes >= 3). Either use another solver or wrap the " "estimator in a OneVsRestClassifier to keep applying a " "one-versus-rest scheme." ) if np.max(X) > 1e30: raise ValueError( "Using the 'liblinear' solver while X contains a maximum " "value > 1e30 results in a frozen fit. Please choose another " "solver or rescale the input X." ) self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, sample_weight=sample_weight, ) return self if solver in ["sag", "saga"]: max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None if n_classes < 2: raise ValueError( "This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % self.classes_[0] ) if self.warm_start: warm_start_coef = getattr(self, "coef_", None) else: warm_start_coef = None if warm_start_coef is not None and self.fit_intercept: warm_start_coef = np.append( warm_start_coef, self.intercept_[:, np.newaxis], axis=1 ) # TODO: enable multi-threading if benchmarks show a positive effect, # see https://github.com/scikit-learn/scikit-learn/issues/32162 n_threads = 1 coefs, _, n_iter = _logistic_regression_path( X, y, classes=self.classes_, Cs=[C_], l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept, tol=self.tol, verbose=self.verbose, solver=solver, max_iter=self.max_iter, class_weight=self.class_weight, check_input=False, random_state=self.random_state, coef=warm_start_coef, penalty=penalty, max_squared_sum=max_squared_sum, sample_weight=sample_weight, n_threads=n_threads, ) self.n_iter_ = np.asarray(n_iter, dtype=np.int32) self.coef_ = coefs[0] if self.fit_intercept: if is_binary: self.intercept_ = self.coef_[-1:] self.coef_ = self.coef_[:-1][None, :] else: self.intercept_ = self.coef_[:, -1] self.coef_ = self.coef_[:, :-1] else: if is_binary: self.intercept_ = np.zeros(1, dtype=X.dtype) self.coef_ = self.coef_[None, :] else: self.intercept_ = np.zeros(n_classes, dtype=X.dtype) return self def predict_proba(self, X): """ Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multiclass / multinomial problem the softmax function is used to find the predicted probability of each class. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ check_is_fitted(self) is_binary = self.classes_.size <= 2 if is_binary: return super()._predict_proba_lr(X) else: decision_2d = self.decision_function(X) return softmax(decision_2d, copy=False) def predict_log_proba(self, X): """ Predict logarithm of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ return np.log(self.predict_proba(X)) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True if self.solver == "liblinear": tags.classifier_tags.multi_class = False return tags
LogisticRegression
python
openai__openai-python
src/openai/types/realtime/realtime_tools_config_union_param.py
{ "start": 2450, "end": 4595 }
class ____(TypedDict, total=False): server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" type: Required[Literal["mcp"]] """The type of the MCP tool. Always `mcp`.""" allowed_tools: Optional[McpAllowedTools] """List of allowed tool names or a filter object.""" authorization: str """ An OAuth access token that can be used with a remote MCP server, either with a custom MCP server URL or a service connector. Your application must handle the OAuth authorization flow and provide the token here. """ connector_id: Literal[ "connector_dropbox", "connector_gmail", "connector_googlecalendar", "connector_googledrive", "connector_microsoftteams", "connector_outlookcalendar", "connector_outlookemail", "connector_sharepoint", ] """Identifier for service connectors, like those available in ChatGPT. One of `server_url` or `connector_id` must be provided. Learn more about service connectors [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). Currently supported `connector_id` values are: - Dropbox: `connector_dropbox` - Gmail: `connector_gmail` - Google Calendar: `connector_googlecalendar` - Google Drive: `connector_googledrive` - Microsoft Teams: `connector_microsoftteams` - Outlook Calendar: `connector_outlookcalendar` - Outlook Email: `connector_outlookemail` - SharePoint: `connector_sharepoint` """ headers: Optional[Dict[str, str]] """Optional HTTP headers to send to the MCP server. Use for authentication or other purposes. """ require_approval: Optional[McpRequireApproval] """Specify which of the MCP server's tools require approval.""" server_description: str """Optional description of the MCP server, used to provide more context.""" server_url: str """The URL for the MCP server. One of `server_url` or `connector_id` must be provided. """ RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp]
Mcp
python
apache__airflow
airflow-core/src/airflow/exceptions.py
{ "start": 5786, "end": 5894 }
class ____(AirflowNotFoundException): """Raise when a Pool is not available in the system."""
PoolNotFound
python
django__django
django/contrib/gis/gdal/field.py
{ "start": 6410, "end": 6886 }
class ____(Field): pass # Class mapping dictionary for OFT Types and reverse mapping. OGRFieldTypes = { 0: OFTInteger, 1: OFTIntegerList, 2: OFTReal, 3: OFTRealList, 4: OFTString, 5: OFTStringList, 6: OFTWideString, 7: OFTWideStringList, 8: OFTBinary, 9: OFTDate, 10: OFTTime, 11: OFTDateTime, 12: OFTInteger64, 13: OFTInteger64List, } ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
OFTInteger64List
python
huggingface__transformers
src/transformers/models/dia/modeling_dia.py
{ "start": 5345, "end": 11684 }
class ____(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: DiaConfig, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = inv_freq @staticmethod def compute_default_rope_parameters( config: Optional[DiaConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) @use_kernel_func_from_hub("rotary_pos_emb") def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
DiaRotaryEmbedding
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py
{ "start": 924, "end": 1121 }
class ____(StudentD): def __init__(self, name, middle_name): self.name = name self.middle_name = middle_name # OK self.setup() def setup(self): pass
StudentE
python
tensorflow__tensorflow
tensorflow/python/tpu/tpu_embedding_v3_utils.py
{ "start": 6168, "end": 10270 }
class ____(trackable_base.Trackable): """Trackable for stacked tables generated from sparse core.""" def __init__(self, stacked_layouts, table_to_config): self.vars = {} self._stacked_layouts = stacked_layouts for table_layout in stacked_layouts: variable_shape = tuple(table_layout.unsharded_shape) self.vars[table_layout.table_name] = tf_variables.Variable( name=table_layout.table_name, initial_value=functools.partial( table_to_config[table_layout.table_name].initializer, variable_shape, dtype=dtypes.float32, ), shape=variable_shape, dtype=dtypes.float32, ) # TODO(b/312743130): This is a workaround. During checkpoint restoration # optimizer expects the trackable to provide a `_unique_id` or equivalent. # Remove this when the bug is fixed. @property def _unique_id(self): return self.vars[self._stacked_layouts[0].table_name]._unique_id def _serialize_to_tensors(self) -> Any: return { # We need to export some variable here for restore to pick # the checkpoint key the actual value is not important so 0 works trackable_base.VARIABLE_VALUE_KEY: tf_constant( 0.0, dtype=dtypes.float32 ), } def _restore_from_tensors(self, restored_tensors: Dict[str, tensor.Tensor]): def fn(restored_tensors): value_from_checkpoint = restored_tensors[ trackable_base.VARIABLE_VALUE_KEY ] # Do unsharding to get the individual tables from the stacked table in # checkpoint for layout in self._stacked_layouts: variable_shape = ( layout.unsharded_shape[0], layout.unsharded_shape[1], ) t_part = unshuffle_from_sc_to_cpu( t=value_from_checkpoint, num_sparse_cores=layout.num_sparse_cores, offset_in_shard=layout.sparse_core_shard_row_offset, size_in_shard=( layout.unsharded_padded_shape[0] // layout.num_sparse_cores ), shard_rotation=layout.sparse_core_shard_rotation, ) t_part = remove_padding_from_sc(t_part, variable_shape) self.vars[layout.table_name].assign(t_part) return fn(restored_tensors) def get_var(self, name: str) -> tf_variables.Variable: return self.vars[name] def get_vars(self) -> Dict[str, tf_variables.Variable]: return self.vars def __repr__(self): return "SparseCoreStackedTableTrackable({})".format(self.vars.keys()) def shard_table( num_sparse_cores: int, table: tensor.Tensor, ) -> tensor.Tensor: """Convert a table to the internal layout. Args: num_sparse_cores: The total number of sparse cores. table: The full table, unsharded. Returns: A tensor containing the sharded value for this table. """ assert table.shape[0] % num_sparse_cores == 0 # Do the sparse core rotation: tmp = array_ops.reshape( table, [-1, num_sparse_cores, table.shape[1]], ) # The mod sharding across sparse cores. tmp = array_ops.transpose(tmp, [1, 0, 2]) return array_ops.reshape(tmp, [-1, table.shape[1]]) def shard_initializer(strategy, initializer) -> tensor.Tensor: """Wraps an initializer to convert a table to the internal layout.""" num_devices = strategy.extended._tpu_devices.size # pylint: disable=protected-access num_sc_per_chip = ( strategy.extended.tpu_hardware_feature.num_embedding_devices_per_chip ) num_scs = num_devices * num_sc_per_chip @functools.wraps(initializer) def wrapper(shape, dtype, shard_info=None): # Initializes the whole table. table = initializer(shape, dtype) if shard_info is None: return table # Convert the table to the internal layout. table = shard_table(num_scs, table) # Pull out the shard of interest. return table[ shard_info.offset[0] : shard_info.offset[0] + shard_info.shape[0], shard_info.offset[1] : shard_info.offset[1] + shard_info.shape[1], ] return wrapper
SparseCoreStackedTableTrackable
python
openai__openai-python
src/openai/types/beta/thread_update_params.py
{ "start": 387, "end": 1174 }
class ____(TypedDict, total=False): metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. """ tool_resources: Optional[ToolResources] """ A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. """
ThreadUpdateParams
python
mahmoud__boltons
boltons/cacheutils.py
{ "start": 22025, "end": 22954 }
class ____: """The ``cachedproperty`` is used similar to :class:`property`, except that the wrapped method is only called once. This is commonly used to implement lazy attributes. After the property has been accessed, the value is stored on the instance itself, using the same name as the cachedproperty. This allows the cache to be cleared with :func:`delattr`, or through manipulating the object's ``__dict__``. """ def __init__(self, func): self.__doc__ = getattr(func, '__doc__') self.__isabstractmethod__ = getattr(func, '__isabstractmethod__', False) self.func = func def __get__(self, obj, objtype=None): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value def __repr__(self): cn = self.__class__.__name__ return f'<{cn} func={self.func}>'
cachedproperty
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_overlap.py
{ "start": 774, "end": 10297 }
class ____(FSDPTest): """ NOTE: Testing stream overlap in PyTorch CI is tricky. One approach is to use CUDA sleeps to emulate kernels in each stream; however, ``torch.cuda._sleep`` requires inputs in units of cycles. The ``get_cycles_per_ms`` function to convert from ms to cycles is computed once and cached thereafter, which means that if there is variation later, the cached value may not be accurate. This leads to flakiness in CI. To address this, we relax the tests as simple sanity checks that the overlapped times are less than a non-overlapped baseline, but we do not test that the overlapped time is less than a precisely calculated time. """ @property def world_size(self) -> int: return min(2, torch.get_device_module(device_type).device_count()) @skip_if_rocm_arch_multiprocess(MI200_ARCH) @skip_if_lt_x_gpu(2) @unittest.skipIf(TEST_HPU, "Sleep is not supported on HPU") def test_fully_shard_training_overlap(self): torch.manual_seed(42) # Use non-trivial comm. time but still shorter than compute time dim, num_linears, compute_sleep_ms, comm_sleep_ms = (4, 3, 25, 10) model = nn.Sequential( *[LinearWithSleep(dim, compute_sleep_ms) for _ in range(num_linears)] ) ref_model = copy.deepcopy(model).to(device_type) for lin in model: assert len(list(lin.parameters())) == 1, "Expects only one weight" fully_shard(lin, reshard_after_forward=True) fully_shard(model, reshard_after_forward=True) orig_all_gather_into_tensor = dist.all_gather_into_tensor orig_reduce_scatter_tensor = dist.reduce_scatter_tensor comm_stream = torch.get_device_module(device_type).Stream() def delay_collective(): # Share a stream so that all-gather and reduce-scatter block each # other like in `ProcessGroupNCCL` comm_stream.wait_stream( torch.get_device_module(device_type).current_stream() ) with torch.get_device_module(device_type).stream(comm_stream): torch.get_device_module(device_type)._sleep( int(comm_sleep_ms * get_cycles_per_ms()) ) torch.get_device_module(device_type).current_stream().wait_stream( comm_stream ) def delayed_all_gather(*args, **kwargs): delay_collective() return orig_all_gather_into_tensor(*args, **kwargs) def delayed_reduce_scatter(*args, **kwargs): delay_collective() return orig_reduce_scatter_tensor(*args, **kwargs) inp = torch.randn((2, dim), device=device_type.type) loss = model(inp).sum() # warmup CUDA and allocator loss.backward() def ref_fwd(): with patch_all_gather(delayed_all_gather): # Run dummy all-gathers per weight (which is one FSDP group) for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) return ref_model(inp) def fwd(): with patch_all_gather(delayed_all_gather): model(inp) ref_fwd_time = self._time_fn(ref_fwd) fwd_time = self._time_fn(fwd) # Forward: only 1st all-gather is exposed # NOTE: Do not enforce the expected forward time due to flakiness in CI # expected_fwd_time = comm_sleep_ms + num_linears * compute_sleep_ms + buffer_ms self.assertLessEqual(fwd_time, ref_fwd_time) def ref_fwd_bwd(): with patch_all_gather(delayed_all_gather): # Run dummy all-gathers per weight (which is one FSDP group) for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) loss = ref_model(inp).sum() # Run dummy all-gathers per weight again since we are # resharding after forward for lin in ref_model: dummy_ag_output = torch.empty_like(lin.weight) dummy_ag_input = torch.chunk(dummy_ag_output, self.world_size)[ self.rank ] dist.all_gather_into_tensor(dummy_ag_output, dummy_ag_input) loss.backward() # Run dummy reduce-scatters per weight for lin in ref_model: dummy_rs_input = torch.empty_like(lin.weight) dummy_rs_output = torch.chunk(dummy_rs_input, self.world_size)[ self.rank ] dist.reduce_scatter_tensor(dummy_rs_output, dummy_rs_input) def fwd_bwd(): with ( patch_all_gather(delayed_all_gather), patch_reduce_scatter(delayed_reduce_scatter), ): loss = model(inp).sum() loss.backward() ref_fwd_bwd_time = self._time_fn(ref_fwd_bwd) fwd_bwd_time = self._time_fn(fwd_bwd) # Backward: only 1st all-gather and last reduce-scatter are exposed; # double the backward compute since computing two gradients per layer # NOTE: Do not enforce the expected forward-backward time due to # flakiness in CI # expected_bwd_time = ( # comm_sleep_ms * 2 + num_linears * 2 * compute_sleep_ms + buffer_ms * 2 # ) self.assertLessEqual(fwd_bwd_time, ref_fwd_bwd_time) @skip_if_lt_x_gpu(2) @unittest.skipIf(TEST_HPU, "Sleep is not supported on HPU") def test_fully_shard_post_optim_event_overlap(self): torch.manual_seed(42) # Use non-trivial comm. time but still shorter than compute time dim, compute_sleep_ms, comm_sleep_ms = (4, 25, 10) # Define the model to have a high-compute linear followed by a # low-compute linear, where only the low-compute linear uses FSDP model = nn.Sequential( LinearWithSleep(dim, compute_sleep_ms), nn.Linear(dim, dim) ).to(device_type) fully_shard(model[1], reshard_after_forward=False) optim = torch.optim.AdamW(model.parameters(), lr=1e-2) orig_all_gather_into_tensor = dist.all_gather_into_tensor def delayed_all_gather(*args, **kwargs): torch.get_device_module(device_type)._sleep( int(comm_sleep_ms * get_cycles_per_ms()) ) return orig_all_gather_into_tensor(*args, **kwargs) inp = torch.randn((2, dim), device=device_type) def run_train_steps(num_iters: int, use_post_optim_event: bool): for _ in range(num_iters): optim.zero_grad() with patch_all_gather(delayed_all_gather): loss = model(inp).sum() loss.backward() with implicit_replication(): optim.step() if use_post_optim_event: post_optim_event = ( torch.get_device_module(device_type) .current_stream() .record_event() ) model[1].set_post_optim_event(post_optim_event) run_train_steps(1, False) # warmup CUDA and allocator num_iters = 5 baseline_time = self._time_fn( functools.partial(run_train_steps, num_iters, False) ) test_time = self._time_fn(functools.partial(run_train_steps, num_iters, True)) buffer_ms = 4 # CPU delays and copies # Baseline: FSDP all-gather is exposed since the FSDP module waits for # the current stream and hence the high-compute linear self.assertLessEqual( baseline_time, num_iters * (3 * compute_sleep_ms + comm_sleep_ms + buffer_ms), ) # Test: FSDP all-gather is overlapped with the high-compute linear # since the FSDP module only waits for the post-optim event (except on # the 1st iteration when no event has been recorded) expected_test_time = ( num_iters * (3 * compute_sleep_ms + buffer_ms) + comm_sleep_ms ) self.assertLessEqual(test_time, expected_test_time) # Since `get_cycles_per_ms` uses lru cache, there may be some variance # between the initially determined cycles vs. the current cycles per # ms, so we relax the baseline check to just that it is greater than # the test time rather than the expected test time self.assertGreater(baseline_time, test_time) def _time_fn(self, fn: Callable): start_event = device_module.Event(enable_timing=True) end_event = device_module.Event(enable_timing=True) dist.barrier() device_module.synchronize() start_event.record() fn() end_event.record() device_module.synchronize() elapsed_time = start_event.elapsed_time(end_event) return elapsed_time
TestFullyShardOverlap
python
getsentry__sentry
tests/sentry/utils/test_auth.py
{ "start": 1903, "end": 5043 }
class ____(TestCase): def _make_request(self, next=None): request = HttpRequest() request.META["SERVER_NAME"] = "testserver" request.META["SERVER_PORT"] = "80" request.session = SessionBase() request.user = self.user if next: request.session["_next"] = next return request def test_schema_uses_default(self) -> None: result = get_login_redirect(self._make_request("http://example.com")) assert result == reverse("sentry-login") result = get_login_redirect(self._make_request("ftp://testserver")) assert result == reverse("sentry-login") def test_next(self) -> None: result = get_login_redirect(self._make_request("http://testserver/foobar/")) assert result == "http://testserver/foobar/" result = get_login_redirect(self._make_request("ftp://testserver/foobar/")) assert result == reverse("sentry-login") request = self._make_request("/foobar/") request.subdomain = "orgslug" result = get_login_redirect(request) assert result == "http://orgslug.testserver/foobar/" request = self._make_request("http://testserver/foobar/") request.subdomain = "orgslug" result = get_login_redirect(request) assert result == "http://testserver/foobar/" request = self._make_request("ftp://testserver/foobar/") request.subdomain = "orgslug" result = get_login_redirect(request) assert result == f"http://orgslug.testserver{reverse('sentry-login')}" def test_after_2fa(self) -> None: request = self._make_request() request.session["_after_2fa"] = "http://testserver/foobar/" result = get_login_redirect(request) assert result == "http://testserver/foobar/" request = self._make_request() request.subdomain = "orgslug" request.session["_after_2fa"] = "/foobar/" result = get_login_redirect(request) assert result == "http://orgslug.testserver/foobar/" def test_pending_2fa(self) -> None: request = self._make_request() request.session["_pending_2fa"] = [1234, 1234, 1234] result = get_login_redirect(request) assert result == reverse("sentry-2fa-dialog") request = self._make_request() request.subdomain = "orgslug" request.session["_pending_2fa"] = [1234, 1234, 1234] result = get_login_redirect(request) assert result == f"http://orgslug.testserver{reverse('sentry-2fa-dialog')}" def test_login_uses_default(self) -> None: result = get_login_redirect(self._make_request(reverse("sentry-login"))) assert result == reverse("sentry-login") def test_no_value_uses_default(self) -> None: result = get_login_redirect(self._make_request()) assert result == reverse("sentry-login") request = self._make_request() request.subdomain = "orgslug" result = get_login_redirect(request) assert result == f"http://orgslug.testserver{reverse('sentry-login')}" @control_silo_test
GetLoginRedirectTest
python
PrefectHQ__prefect
src/prefect/server/services/base.py
{ "start": 1858, "end": 4510 }
class ____(ABC): name: str logger: Logger @classmethod @abstractmethod def service_settings(cls) -> ServicesBaseSetting: """The Prefect setting that controls whether the service is enabled""" ... @classmethod def environment_variable_name(cls) -> str: return canonical_environment_prefix(cls.service_settings()) + "ENABLED" @classmethod def enabled(cls) -> bool: """Whether the service is enabled""" return cls.service_settings().enabled @classmethod def all_services(cls) -> Sequence[type[Self]]: """Get list of all service classes""" discovered: list[type[Self]] = [] for module in _known_service_modules(): for _, obj in inspect.getmembers(module): if ( inspect.isclass(obj) and issubclass(obj, cls) and not inspect.isabstract(obj) ): discovered.append(obj) return discovered @classmethod def enabled_services(cls) -> list[type[Self]]: """Get list of enabled service classes""" return [svc for svc in cls.all_services() if svc.enabled()] @classmethod @asynccontextmanager async def running(cls) -> AsyncGenerator[None, None]: """A context manager that runs enabled services on entry and stops them on exit.""" service_tasks: dict[Service, asyncio.Task[None]] = {} for service_class in cls.enabled_services(): service = service_class() service_tasks[service] = asyncio.create_task(service.start()) try: yield finally: await asyncio.gather(*[service.stop() for service in service_tasks]) await asyncio.gather(*service_tasks.values(), return_exceptions=True) @classmethod async def run_services(cls) -> NoReturn: """Run enabled services until cancelled.""" async with cls.running(): heat_death_of_the_universe = asyncio.get_running_loop().create_future() try: await heat_death_of_the_universe except asyncio.CancelledError: logger.info("Received cancellation, stopping services...") @abstractmethod async def start(self) -> NoReturn: """Start running the service, which may run indefinitely""" ... @abstractmethod async def stop(self) -> None: """Stop the service""" ... def __init__(self): self.name = self.__class__.__name__ self.logger = get_logger(f"server.services.{self.name.lower()}")
Service
python
apache__airflow
providers/google/tests/unit/google/cloud/hooks/test_gdm.py
{ "start": 1146, "end": 3980 }
class ____: def setup_method(self): with mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", new=mock_init, ): self.gdm_hook = GoogleDeploymentManagerHook(gcp_conn_id="test") @mock.patch("airflow.providers.google.cloud.hooks.gdm.GoogleDeploymentManagerHook.get_conn") def test_list_deployments(self, mock_get_conn): response1 = {"deployments": [{"id": "deployment1", "name": "test-deploy1"}], "pageToken": None} response2 = {"deployments": [{"id": "deployment2", "name": "test-deploy2"}], "pageToken": None} mock_get_conn.return_value.deployments.return_value.list.return_value.execute.return_value = response1 request_mock = mock.MagicMock() request_mock.execute.return_value = response2 mock_get_conn.return_value.deployments.return_value.list_next.side_effect = [ request_mock, None, ] deployments = self.gdm_hook.list_deployments( project_id=TEST_PROJECT, deployment_filter="filter", order_by="name" ) mock_get_conn.assert_called_once_with() mock_get_conn.return_value.deployments.return_value.list.assert_called_once_with( project=TEST_PROJECT, filter="filter", orderBy="name", ) assert mock_get_conn.return_value.deployments.return_value.list_next.call_count == 2 assert deployments == [ {"id": "deployment1", "name": "test-deploy1"}, {"id": "deployment2", "name": "test-deploy2"}, ] @mock.patch("airflow.providers.google.cloud.hooks.gdm.GoogleDeploymentManagerHook.get_conn") def test_delete_deployment(self, mock_get_conn): self.gdm_hook.delete_deployment(project_id=TEST_PROJECT, deployment=TEST_DEPLOYMENT) mock_get_conn.assert_called_once_with() mock_get_conn.return_value.deployments().delete.assert_called_once_with( project=TEST_PROJECT, deployment=TEST_DEPLOYMENT, deletePolicy=None ) @mock.patch("airflow.providers.google.cloud.hooks.gdm.GoogleDeploymentManagerHook.get_conn") def test_delete_deployment_delete_fails(self, mock_get_conn): resp = {"error": {"errors": [{"message": "error deleting things.", "domain": "global"}]}} mock_get_conn.return_value.deployments.return_value.delete.return_value.execute.return_value = resp with pytest.raises(AirflowException): self.gdm_hook.delete_deployment(project_id=TEST_PROJECT, deployment=TEST_DEPLOYMENT) mock_get_conn.assert_called_once_with() mock_get_conn.return_value.deployments().delete.assert_called_once_with( project=TEST_PROJECT, deployment=TEST_DEPLOYMENT, deletePolicy=None )
TestDeploymentManagerHook
python
apache__airflow
providers/google/tests/unit/google/cloud/sensors/test_gcs.py
{ "start": 2071, "end": 7705 }
class ____: @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor.defer") def test_gcs_object_existence_sensor_return_value(self, mock_defer, mock_hook): task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) mock_hook.return_value.list.return_value = True return_value = task.execute(mock.MagicMock()) assert return_value @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") def test_should_pass_argument_to_hook(self, mock_hook): task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, use_glob=False, google_cloud_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) mock_hook.return_value.exists.return_value = True result = task.poke(mock.MagicMock()) assert result is True mock_hook.assert_called_once_with( gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) mock_hook.return_value.exists.assert_called_once_with(TEST_BUCKET, TEST_OBJECT, DEFAULT_RETRY) @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") def test_should_pass_argument_to_hook_using_glob(self, mock_hook): task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, use_glob=True, google_cloud_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) mock_hook.return_value.list.return_value = [mock.MagicMock()] result = task.poke(mock.MagicMock()) assert result is True mock_hook.assert_called_once_with( gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, match_glob=TEST_OBJECT) @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor.defer") def test_gcs_object_existence_sensor_finish_before_deferred(self, mock_defer, mock_hook): task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) mock_hook.return_value.exists.return_value = True task.execute(mock.MagicMock()) assert not mock_defer.called @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") def test_gcs_object_existence_sensor_deferred(self, mock_hook): """ Asserts that a task is deferred and a GCSBlobTrigger will be fired when the GCSObjectExistenceSensor is executed and deferrable is set to True. """ task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) mock_hook.return_value.exists.return_value = False with pytest.raises(TaskDeferred) as exc: task.execute({}) assert isinstance(exc.value.trigger, GCSBlobTrigger), "Trigger is not a GCSBlobTrigger" def test_gcs_object_existence_sensor_deferred_execute_failure(self): """Tests that an AirflowException is raised in case of error event when deferrable is set to True""" task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) with pytest.raises(AirflowException): task.execute_complete(context=None, event={"status": "error", "message": "test failure message"}) def test_gcs_object_existence_sensor_execute_complete(self): """Asserts that logging occurs as expected when deferrable is set to True""" task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) with mock.patch.object(task.log, "info") as mock_log_info: task.execute_complete(context=None, event={"status": "success", "message": "Job completed"}) mock_log_info.assert_called_with("File %s was found in bucket %s.", TEST_OBJECT, TEST_BUCKET) def test_gcs_object_existence_sensor_execute_complete_return_value(self): """Asserts that logging occurs as expected when deferrable is set to True""" task = GCSObjectExistenceSensor( task_id="task-id", bucket=TEST_BUCKET, object=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) with mock.patch.object(task.log, "info") as mock_log_info: return_value = task.execute_complete( context=None, event={"status": "success", "message": "Job completed"} ) mock_log_info.assert_called_with("File %s was found in bucket %s.", TEST_OBJECT, TEST_BUCKET) assert return_value
TestGoogleCloudStorageObjectSensor
python
kamyu104__LeetCode-Solutions
Python/minimize-hamming-distance-after-swap-operations.py
{ "start": 2083, "end": 2868 }
class ____(object): def minimumHammingDistance(self, source, target, allowedSwaps): """ :type source: List[int] :type target: List[int] :type allowedSwaps: List[List[int]] :rtype: int """ uf = UnionFind(len(source)) for x, y in allowedSwaps: uf.union_set(x, y) groups = collections.defaultdict(set) for i in xrange(len(source)): groups[uf.find_set(i)].add(i) result = 0 for idxs in groups.itervalues(): source_cnt = collections.Counter([source[i] for i in idxs]) target_cnt = collections.Counter([target[i] for i in idxs]) diff = source_cnt-target_cnt result += sum(diff.itervalues()) return result
Solution2
python
bokeh__bokeh
tests/unit/bokeh/core/test_properties.py
{ "start": 15667, "end": 18790 }
class ____(HasProps): pass def test_HasProps_equals() -> None: class Foo(HasProps): x = Int(12) y = String("hello") z = List(Int, default=[1,2,3]) class FooUnrelated(HasProps): x = Int(12) y = String("hello") z = List(Int, default=[1,2,3]) v = Foo().equals(Foo()) assert v is True v = Foo(x=1).equals(Foo(x=1)) assert v is True v = Foo(x=1).equals(Foo(x=2)) assert v is False v = Foo(x=1).equals(1) assert v is False v = Foo().equals(FooUnrelated()) assert v is False def test_HasProps_clone() -> None: class CloneModel(HasProps): p0 = Int() p1 = Alias("p0") p2 = List(Int) p3 = Readonly(Nullable(Instance(lambda: CloneModel)), default=None) obj0 = CloneModel(p0=10, p2=[1, 2, 3], p3=CloneModel(p0=20, p2=[4, 5, 6])) props0 = obj0.properties_with_values(include_defaults=False) obj1 = obj0.clone() props1 = obj1.properties_with_values(include_defaults=False) assert props0 == props1 assert obj1.p0 == obj0.p0 assert obj1.p1 == obj0.p1 assert obj1.p2 == obj0.p2 assert obj1.p3 == obj0.p3 obj1.p0 = 20 assert obj1.p0 == obj1.p1 == 20 assert obj0.p0 == obj0.p1 == 10 obj1.p2.append(4) assert obj1.p2 == [1, 2, 3, 4] assert obj0.p2 == [1, 2, 3, 4] obj1.p2 = [10, 20] assert obj1.p2 == [10, 20] assert obj0.p2 == [1, 2, 3, 4] obj1.p3.p0 = 30 assert obj1.p3.p0 == 30 assert obj0.p3.p0 == 30 with pytest.raises(RuntimeError, match=r"CloneModel.p3 is a readonly property"): obj0.p3 = None with pytest.raises(RuntimeError, match=r"CloneModel.p3 is a readonly property"): obj1.p3 = None def test_Model_clone() -> None: class Foo(Model, Local): p0 = Int() p1 = Alias("p0") p2 = List(Int) p3 = Nullable(Instance(lambda: Foo), default=None) f1 = Foo(p0=10, p2=[1, 2, 3], p3=Foo(p0=20, p2=[4, 5, 6])) c1 = f1.properties_with_values(include_defaults=False) f2 = f1.clone() c2 = f2.properties_with_values(include_defaults=False) assert f1.id != f2.id assert c1 == c2 assert f1.p3.id == f2.p3.id f2.p2.append(4) assert f1.p2 == [1, 2, 3, 4] f2.p3.p0 = 30 assert f1.p3.p0 == 30 def test_Alias() -> None: class Foo(HasProps): x = Int(12) ax = Alias('x') f = Foo(x=10) assert f.x == 10 assert f.ax == 10 f.x = 20 assert f.x == 20 assert f.ax == 20 f.ax = 30 assert f.x == 30 assert f.ax == 30 #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- Test___all__ = verify_all(bcp, ALL)
Baz
python
kamyu104__LeetCode-Solutions
Python/my-calendar-i.py
{ "start": 663, "end": 1048 }
class ____(object): def __init__(self): self.__root = None def book(self, start, end): """ :type start: int :type end: int :rtype: bool """ if self.__root is None: self.__root = Node(start, end) return True return self.root.insert(Node(start, end)) # Time: O(n^2) # Space: O(n)
MyCalendar
python
sqlalchemy__sqlalchemy
test/engine/test_execute.py
{ "start": 30371, "end": 35753 }
class ____(fixtures.TestBase): __sparse_driver_backend__ = True def test_cache(self, connection, metadata): users = Table( "users", metadata, Column( "user_id", INT, primary_key=True, test_needs_autoincrement=True ), Column("user_name", VARCHAR(20)), Column("extra_data", VARCHAR(20)), ) users.create(connection) conn = connection cache = {} cached_conn = conn.execution_options(compiled_cache=cache) ins = users.insert() with patch.object( ins, "_compiler", Mock(side_effect=ins._compiler) ) as compile_mock: cached_conn.execute(ins, {"user_name": "u1"}) cached_conn.execute(ins, {"user_name": "u2"}) cached_conn.execute(ins, {"user_name": "u3"}) eq_(compile_mock.call_count, 1) assert len(cache) == 1 eq_(conn.exec_driver_sql("select count(*) from users").scalar(), 3) @testing.only_on( ["sqlite", "mysql", "postgresql"], "uses blob value that is problematic for some DBAPIs", ) def test_cache_noleak_on_statement_values(self, metadata, connection): # This is a non regression test for an object reference leak caused # by the compiled_cache. photo = Table( "photo", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("photo_blob", LargeBinary()), ) metadata.create_all(connection) cache = {} cached_conn = connection.execution_options(compiled_cache=cache) class PhotoBlob(bytearray): pass blob = PhotoBlob(100) ref_blob = weakref.ref(blob) ins = photo.insert() with patch.object( ins, "_compiler", Mock(side_effect=ins._compiler) ) as compile_mock: cached_conn.execute(ins, {"photo_blob": blob}) eq_(compile_mock.call_count, 1) eq_(len(cache), 1) eq_( connection.exec_driver_sql("select count(*) from photo").scalar(), 1, ) del blob gc_collect() # The compiled statement cache should not hold any reference to the # the statement values (only the keys). eq_(ref_blob(), None) def test_keys_independent_of_ordering(self, connection, metadata): users = Table( "users", metadata, Column( "user_id", INT, primary_key=True, test_needs_autoincrement=True ), Column("user_name", VARCHAR(20)), Column("extra_data", VARCHAR(20)), ) users.create(connection) connection.execute( users.insert(), {"user_id": 1, "user_name": "u1", "extra_data": "e1"}, ) cache = {} cached_conn = connection.execution_options(compiled_cache=cache) upd = users.update().where(users.c.user_id == bindparam("b_user_id")) with patch.object( upd, "_compiler", Mock(side_effect=upd._compiler) ) as compile_mock: cached_conn.execute( upd, util.OrderedDict( [ ("b_user_id", 1), ("user_name", "u2"), ("extra_data", "e2"), ] ), ) cached_conn.execute( upd, util.OrderedDict( [ ("b_user_id", 1), ("extra_data", "e3"), ("user_name", "u3"), ] ), ) cached_conn.execute( upd, util.OrderedDict( [ ("extra_data", "e4"), ("user_name", "u4"), ("b_user_id", 1), ] ), ) eq_(compile_mock.call_count, 1) eq_(len(cache), 1) @testing.requires.schemas def test_schema_translate_in_key(self, metadata, connection): Table("x", metadata, Column("q", Integer)) Table("x", metadata, Column("q", Integer), schema=config.test_schema) metadata.create_all(connection) m = MetaData() t1 = Table("x", m, Column("q", Integer)) ins = t1.insert() stmt = select(t1.c.q) cache = {} conn = connection.execution_options(compiled_cache=cache) conn.execute(ins, {"q": 1}) eq_(conn.scalar(stmt), 1) conn = connection.execution_options( compiled_cache=cache, schema_translate_map={None: config.test_schema}, ) conn.execute(ins, {"q": 2}) eq_(conn.scalar(stmt), 2) conn = connection.execution_options( compiled_cache=cache, schema_translate_map={None: None}, ) # should use default schema again even though statement # was compiled with test_schema in the map eq_(conn.scalar(stmt), 1) conn = connection.execution_options( compiled_cache=cache, ) eq_(conn.scalar(stmt), 1)
CompiledCacheTest
python
PyCQA__pylint
tests/functional/g/generic_class_syntax.py
{ "start": 616, "end": 702 }
class ____(Generic[_T]): def __init__(self): self.update_interval = 0
Parent
python
kamyu104__LeetCode-Solutions
Python/count-the-number-of-square-free-subsets.py
{ "start": 1723, "end": 3442 }
class ____(object): def squareFreeSubsets(self, nums): """ :type nums: List[int] :rtype: int """ def linear_sieve_of_eratosthenes(n): # Time: O(n), Space: O(n) primes = [] spf = [-1]*(n+1) # the smallest prime factor for i in xrange(2, n+1): if spf[i] == -1: spf[i] = i primes.append(i) for p in primes: if i*p > n or p > spf[i]: break spf[i*p] = p return primes MAX_NUM = max(nums) PRIMES = linear_sieve_of_eratosthenes(MAX_NUM) MASKS = [0]*(MAX_NUM+1) for x in xrange(MAX_NUM+1): y = x for i, p in enumerate(PRIMES): if y%p: continue if y%p**2 == 0: MASKS[x] = 0 break MASKS[x] |= (1<<i) y //= p MOD = 10**9+7 cnt = collections.Counter(nums) arr = [x for x in cnt.iterkeys() if x != 1] dp = [[-1]*(1<<len(PRIMES)) for i in xrange(len(arr))] def memoization(i, mask): if i == len(arr): return 1 if dp[i][mask] == -1: dp[i][mask] = memoization(i+1, mask) if MASKS[arr[i]] and (MASKS[arr[i]]&mask) == MASKS[arr[i]]: dp[i][mask] = (dp[i][mask]+cnt[arr[i]]*memoization(i+1, mask^MASKS[arr[i]]))%MOD return dp[i][mask] return (memoization(0, (1<<len(PRIMES))-1)*pow(2, cnt[1], MOD)-1)%MOD if 1 in cnt else (memoization(0, (1<<len(PRIMES))-1)-1)%MOD
Solution2
python
scikit-learn__scikit-learn
sklearn/gaussian_process/kernels.py
{ "start": 33369, "end": 39522 }
class ____(Kernel): """The Exponentiation kernel takes one base kernel and a scalar parameter :math:`p` and combines them via .. math:: k_{exp}(X, Y) = k(X, Y) ^p Note that the `__pow__` magic method is overridden, so `Exponentiation(RBF(), 2)` is equivalent to using the ** operator with `RBF() ** 2`. Read more in the :ref:`User Guide <gp_kernels>`. .. versionadded:: 0.18 Parameters ---------- kernel : Kernel The base kernel exponent : float The exponent for the base kernel Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import (RationalQuadratic, ... Exponentiation) >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = Exponentiation(RationalQuadratic(), exponent=2) >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 0.419 >>> gpr.predict(X[:1,:], return_std=True) (array([635.5]), array([0.559])) """ def __init__(self, kernel, exponent): self.kernel = kernel self.exponent = exponent def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ params = dict(kernel=self.kernel, exponent=self.exponent) if deep: deep_items = self.kernel.get_params().items() params.update(("kernel__" + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [] for hyperparameter in self.kernel.hyperparameters: r.append( Hyperparameter( "kernel__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements, ) ) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return self.kernel.theta @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ self.kernel.theta = theta @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return self.kernel.bounds def __eq__(self, b): if type(self) != type(b): return False return self.kernel == b.kernel and self.exponent == b.exponent def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object,\ default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ if eval_gradient: K, K_gradient = self.kernel(X, Y, eval_gradient=True) K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1) return K**self.exponent, K_gradient else: K = self.kernel(X, Y, eval_gradient=False) return K**self.exponent def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.kernel.diag(X) ** self.exponent def __repr__(self): return "{0} ** {1}".format(self.kernel, self.exponent) def is_stationary(self): """Returns whether the kernel is stationary.""" return self.kernel.is_stationary() @property def requires_vector_input(self): """Returns whether the kernel is defined on discrete structures.""" return self.kernel.requires_vector_input
Exponentiation
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/triggers/dataflow.py
{ "start": 35161, "end": 40426 }
class ____(BaseTrigger): """ Trigger that monitors if a Dataflow job has reached any of successful terminal state meant for that job. :param job_id: Required. ID of the job. :param project_id: Required. The Google Cloud project ID in which the job was started. :param location: Optional. The location where the job is executed. If set to None then the value of DEFAULT_DATAFLOW_LOCATION will be used. :param wait_until_finished: Optional. Dataflow option to block pipeline until completion. :param gcp_conn_id: The connection ID to use for connecting to Google Cloud. :param poll_sleep: Time (seconds) to wait between two consecutive calls to check the job. :param impersonation_chain: Optional. Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ def __init__( self, job_id: str, project_id: str | None, location: str = DEFAULT_DATAFLOW_LOCATION, wait_until_finished: bool | None = None, gcp_conn_id: str = "google_cloud_default", poll_sleep: int = 10, impersonation_chain: str | Sequence[str] | None = None, ): super().__init__() self.job_id = job_id self.project_id = project_id self.location = location self.wait_until_finished = wait_until_finished self.gcp_conn_id = gcp_conn_id self.poll_sleep = poll_sleep self.impersonation_chain = impersonation_chain def serialize(self) -> tuple[str, dict[str, Any]]: """Serialize class arguments and classpath.""" return ( "airflow.providers.google.cloud.triggers.dataflow.DataflowJobStateCompleteTrigger", { "job_id": self.job_id, "project_id": self.project_id, "location": self.location, "wait_until_finished": self.wait_until_finished, "gcp_conn_id": self.gcp_conn_id, "poll_sleep": self.poll_sleep, "impersonation_chain": self.impersonation_chain, }, ) async def run(self): """ Loop until the job reaches successful final or error state. Yields a TriggerEvent with success status, if the job reaches successful state for own type. Yields a TriggerEvent with error status, if the client returns an unexpected terminal job status or any exception is raised while looping. In any other case the Trigger will wait for a specified amount of time stored in self.poll_sleep variable. """ try: while True: job = await self.async_hook.get_job( project_id=self.project_id, job_id=self.job_id, location=self.location, ) job_state = job.current_state.name job_type_name = job.type_.name FAILED_STATES = DataflowJobStatus.FAILED_END_STATES | {DataflowJobStatus.JOB_STATE_DRAINED} if job_state in FAILED_STATES: yield TriggerEvent( { "status": "error", "message": ( f"Job with id '{self.job_id}' is in failed terminal state: {job_state}" ), } ) return if self.async_hook.job_reached_terminal_state( job={"id": self.job_id, "currentState": job_state, "type": job_type_name}, wait_until_finished=self.wait_until_finished, ): yield TriggerEvent( { "status": "success", "message": ( f"Job with id '{self.job_id}' has reached successful final state: {job_state}" ), } ) return self.log.info("Sleeping for %s seconds.", self.poll_sleep) await asyncio.sleep(self.poll_sleep) except Exception as e: self.log.error("Exception occurred while checking for job state!") yield TriggerEvent( { "status": "error", "message": str(e), } ) @cached_property def async_hook(self) -> AsyncDataflowHook: return AsyncDataflowHook( gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, impersonation_chain=self.impersonation_chain, )
DataflowJobStateCompleteTrigger
python
django-mptt__django-mptt
mptt/models.py
{ "start": 16237, "end": 45669 }
class ____(models.Model, metaclass=MPTTModelBase): """ Base class for tree models. """ class Meta: abstract = True objects = TreeManager() def __init__(self, *args, **kwargs): if hasattr(self, "_check_no_testing_generators"): self._check_no_testing_generators() super().__init__(*args, **kwargs) self._mptt_meta.update_mptt_cached_fields(self) def _mpttfield(self, fieldname): translated_fieldname = getattr(self._mptt_meta, fieldname + "_attr") return getattr(self, translated_fieldname) @_classproperty def _mptt_updates_enabled(cls): if not cls._mptt_tracking_base: return True return getattr( cls._mptt_tracking_base._threadlocal, "mptt_updates_enabled", True ) # ideally this'd be part of the _mptt_updates_enabled classproperty, but it seems # that settable classproperties are very, very hard to do! suggestions please :) @classmethod def _set_mptt_updates_enabled(cls, value): assert cls is cls._mptt_tracking_base, ( "Can't enable or disable mptt updates on a non-tracking class." ) cls._threadlocal.mptt_updates_enabled = value @_classproperty def _mptt_is_tracking(cls): if not cls._mptt_tracking_base: return False if not hasattr(cls._threadlocal, "mptt_delayed_tree_changes"): # happens the first time this is called from each thread cls._threadlocal.mptt_delayed_tree_changes = None return cls._threadlocal.mptt_delayed_tree_changes is not None @classmethod def _mptt_start_tracking(cls): assert cls is cls._mptt_tracking_base, ( "Can't start or stop mptt tracking on a non-tracking class." ) assert not cls._mptt_is_tracking, "mptt tracking is already started." cls._threadlocal.mptt_delayed_tree_changes = set() @classmethod def _mptt_stop_tracking(cls): assert cls is cls._mptt_tracking_base, ( "Can't start or stop mptt tracking on a non-tracking class." ) assert cls._mptt_is_tracking, "mptt tracking isn't started." results = cls._threadlocal.mptt_delayed_tree_changes cls._threadlocal.mptt_delayed_tree_changes = None return results @classmethod def _mptt_track_tree_modified(cls, tree_id): if not cls._mptt_is_tracking: return cls._threadlocal.mptt_delayed_tree_changes.add(tree_id) @classmethod def _mptt_track_tree_insertions(cls, tree_id, num_inserted): if not cls._mptt_is_tracking: return changes = cls._threadlocal.mptt_delayed_tree_changes if not num_inserted or not changes: return if num_inserted < 0: deleted = range(tree_id + num_inserted, -num_inserted) changes.difference_update(deleted) new_changes = {(t + num_inserted if t >= tree_id else t) for t in changes} cls._threadlocal.mptt_delayed_tree_changes = new_changes @raise_if_unsaved def get_ancestors(self, ascending=False, include_self=False): """ Creates a ``QuerySet`` containing the ancestors of this model instance. This defaults to being in descending order (root ancestor first, immediate parent last); passing ``True`` for the ``ascending`` argument will reverse the ordering (immediate parent first, root ancestor last). If ``include_self`` is ``True``, the ``QuerySet`` will also include this model instance. """ opts = self._mptt_meta if self.is_root_node(): if not include_self: return self._tree_manager.none() else: # Filter on pk for efficiency. qs = self._tree_manager.filter(pk=self.pk) else: order_by = opts.left_attr if ascending: order_by = "-" + order_by left = getattr(self, opts.left_attr) right = getattr(self, opts.right_attr) if not include_self: left -= 1 right += 1 qs = self._tree_manager._mptt_filter( left__lte=left, right__gte=right, tree_id=self._mpttfield("tree_id"), ) qs = qs.order_by(order_by) if hasattr(self, "_mptt_use_cached_ancestors"): # Called during or after a `recursetree` tag. # There should be cached parents up to level 0. # So we can use them to avoid doing a query at all. ancestors = [] p = self if not include_self: p = getattr(p, opts.parent_attr) while p is not None: ancestors.append(p) p = getattr(p, opts.parent_attr) ancestors.reverse() qs._result_cache = ancestors return qs @raise_if_unsaved def get_family(self): """ Returns a ``QuerySet`` containing the ancestors, the model itself and the descendants, in tree order. """ opts = self._mptt_meta left = getattr(self, opts.left_attr) right = getattr(self, opts.right_attr) ancestors = Q( **{ "%s__lte" % opts.left_attr: left, "%s__gte" % opts.right_attr: right, opts.tree_id_attr: self._mpttfield("tree_id"), } ) descendants = Q( **{ "%s__gte" % opts.left_attr: left, "%s__lte" % opts.left_attr: right, opts.tree_id_attr: self._mpttfield("tree_id"), } ) return self._tree_manager.filter(ancestors | descendants) @raise_if_unsaved def get_children(self): """ Returns a ``QuerySet`` containing the immediate children of this model instance, in tree order. The benefit of using this method over the reverse relation provided by the ORM to the instance's children is that a database query can be avoided in the case where the instance is a leaf node (it has no children). If called from a template where the tree has been walked by the ``cache_tree_children`` filter, no database query is required. """ if hasattr(self, "_cached_children"): qs = self._tree_manager.filter(pk__in=[n.pk for n in self._cached_children]) qs._result_cache = self._cached_children return qs else: if self.is_leaf_node(): return self._tree_manager.none() return self._tree_manager._mptt_filter(parent=self) @raise_if_unsaved def get_descendants(self, include_self=False): """ Creates a ``QuerySet`` containing descendants of this model instance, in tree order. If ``include_self`` is ``True``, the ``QuerySet`` will also include this model instance. """ if self.is_leaf_node(): if not include_self: return self._tree_manager.none() else: return self._tree_manager.filter(pk=self.pk) opts = self._mptt_meta left = getattr(self, opts.left_attr) right = getattr(self, opts.right_attr) if not include_self: left += 1 right -= 1 return self._tree_manager._mptt_filter( tree_id=self._mpttfield("tree_id"), left__gte=left, left__lte=right ) def get_descendant_count(self): """ Returns the number of descendants this model instance has. """ if self._mpttfield("right") is None: # node not saved yet return 0 else: return (self._mpttfield("right") - self._mpttfield("left") - 1) // 2 @raise_if_unsaved def get_leafnodes(self, include_self=False): """ Creates a ``QuerySet`` containing leafnodes of this model instance, in tree order. If ``include_self`` is ``True``, the ``QuerySet`` will also include this model instance (if it is a leaf node) """ descendants = self.get_descendants(include_self=include_self) return self._tree_manager._mptt_filter( descendants, left=(models.F(self._mptt_meta.right_attr) - 1) ) @raise_if_unsaved def get_next_sibling(self, *filter_args, **filter_kwargs): """ Returns this model instance's next sibling in the tree, or ``None`` if it doesn't have a next sibling. """ qs = self._tree_manager.filter(*filter_args, **filter_kwargs) if self.is_root_node(): qs = self._tree_manager._mptt_filter( qs, parent=None, tree_id__gt=self._mpttfield("tree_id"), ) else: qs = self._tree_manager._mptt_filter( qs, parent__pk=getattr(self, self._mptt_meta.parent_attr + "_id"), left__gt=self._mpttfield("right"), ) siblings = qs[:1] return (siblings and siblings[0]) or None @raise_if_unsaved def get_previous_sibling(self, *filter_args, **filter_kwargs): """ Returns this model instance's previous sibling in the tree, or ``None`` if it doesn't have a previous sibling. """ opts = self._mptt_meta qs = self._tree_manager.filter(*filter_args, **filter_kwargs) if self.is_root_node(): qs = self._tree_manager._mptt_filter( qs, parent=None, tree_id__lt=self._mpttfield("tree_id"), ) qs = qs.order_by("-" + opts.tree_id_attr) else: qs = self._tree_manager._mptt_filter( qs, parent__pk=getattr(self, opts.parent_attr + "_id"), right__lt=self._mpttfield("left"), ) qs = qs.order_by("-" + opts.right_attr) siblings = qs[:1] return (siblings and siblings[0]) or None @raise_if_unsaved def get_root(self): """ Returns the root node of this model instance's tree. """ if self.is_root_node() and type(self) == self._tree_manager.tree_model: return self return self._tree_manager._mptt_filter( tree_id=self._mpttfield("tree_id"), parent=None, ).get() @raise_if_unsaved def get_siblings(self, include_self=False): """ Creates a ``QuerySet`` containing siblings of this model instance. Root nodes are considered to be siblings of other root nodes. If ``include_self`` is ``True``, the ``QuerySet`` will also include this model instance. """ if self.is_root_node(): queryset = self._tree_manager._mptt_filter(parent=None) else: parent_id = getattr(self, self._mptt_meta.parent_attr + "_id") queryset = self._tree_manager._mptt_filter(parent__pk=parent_id) if not include_self: queryset = queryset.exclude(pk=self.pk) return queryset def get_level(self): """ Returns the level of this node (distance from root) """ return getattr(self, self._mptt_meta.level_attr) def insert_at( self, target, position="first-child", save=False, allow_existing_pk=False, refresh_target=True, ): """ Convenience method for calling ``TreeManager.insert_node`` with this model instance. """ self._tree_manager.insert_node( self, target, position, save, allow_existing_pk=allow_existing_pk, refresh_target=refresh_target, ) def is_child_node(self): """ Returns ``True`` if this model instance is a child node, ``False`` otherwise. """ return not self.is_root_node() def is_leaf_node(self): """ Returns ``True`` if this model instance is a leaf node (it has no children), ``False`` otherwise. """ return not self.get_descendant_count() def is_root_node(self): """ Returns ``True`` if this model instance is a root node, ``False`` otherwise. """ return getattr(self, self._mptt_meta.parent_attr + "_id") is None @raise_if_unsaved def is_descendant_of(self, other, include_self=False): """ Returns ``True`` if this model is a descendant of the given node, ``False`` otherwise. If include_self is True, also returns True if the two nodes are the same node. """ opts = self._mptt_meta if include_self and other.pk == self.pk: return True if getattr(self, opts.tree_id_attr) != getattr(other, opts.tree_id_attr): return False else: left = getattr(self, opts.left_attr) right = getattr(self, opts.right_attr) return left > getattr(other, opts.left_attr) and right < getattr( other, opts.right_attr ) @raise_if_unsaved def is_ancestor_of(self, other, include_self=False): """ Returns ``True`` if this model is an ancestor of the given node, ``False`` otherwise. If include_self is True, also returns True if the two nodes are the same node. """ if include_self and other.pk == self.pk: return True return other.is_descendant_of(self) def move_to(self, target, position="first-child"): """ Convenience method for calling ``TreeManager.move_node`` with this model instance. NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``. In most cases you should just move the node yourself by setting node.parent. """ self._tree_manager.move_node(self, target, position) def _is_saved(self, using=None): if self.pk is None or self._mpttfield("tree_id") is None: return False opts = self._meta if opts.pk.remote_field is None: return True else: if not hasattr(self, "_mptt_saved"): manager = self.__class__._base_manager manager = manager.using(using) self._mptt_saved = manager.filter(pk=self.pk).exists() return self._mptt_saved def _get_user_field_names(self): """Returns the list of user defined (i.e. non-mptt internal) field names.""" from django.db.models.fields import AutoField field_names = [] internal_fields = ( self._mptt_meta.left_attr, self._mptt_meta.right_attr, self._mptt_meta.tree_id_attr, self._mptt_meta.level_attr, ) for field in self._meta.concrete_fields: if ( (field.name not in internal_fields) and (not isinstance(field, AutoField)) and (not field.primary_key) ): field_names.append(field.name) return field_names def save(self, *args, **kwargs): """ If this is a new node, sets tree fields up before it is inserted into the database, making room in the tree structure as necessary, defaulting to making the new node the last child of its parent. It the node's left and right edge indicators already been set, we take this as indication that the node has already been set up for insertion, so its tree fields are left untouched. If this is an existing node and its parent has been changed, performs reparenting in the tree structure, defaulting to making the node the last child of its new parent. In either case, if the node's class has its ``order_insertion_by`` tree option set, the node will be inserted or moved to the appropriate position to maintain ordering by the specified field. """ do_updates = self.__class__._mptt_updates_enabled track_updates = self.__class__._mptt_is_tracking opts = self._mptt_meta if not (do_updates or track_updates): # inside manager.disable_mptt_updates(), don't do any updates. # unless we're also inside TreeManager.delay_mptt_updates() if self._mpttfield("left") is None: # we need to set *some* values, though don't care too much what. parent = cached_field_value(self, opts.parent_attr) # if we have a cached parent, have a stab at getting # possibly-correct values. otherwise, meh. if parent: left = parent._mpttfield("left") + 1 setattr(self, opts.left_attr, left) setattr(self, opts.right_attr, left + 1) setattr(self, opts.level_attr, parent._mpttfield("level") + 1) setattr(self, opts.tree_id_attr, parent._mpttfield("tree_id")) self._tree_manager._post_insert_update_cached_parent_right( parent, 2 ) else: setattr(self, opts.left_attr, 1) setattr(self, opts.right_attr, 2) setattr(self, opts.level_attr, 0) setattr(self, opts.tree_id_attr, 0) return super().save(*args, **kwargs) parent_id = opts.get_raw_field_value(self, opts.parent_attr) # determine whether this instance is already in the db force_update = kwargs.get("force_update", False) force_insert = kwargs.get("force_insert", False) collapse_old_tree = None deferred_fields = self.get_deferred_fields() if force_update or ( not force_insert and self._is_saved(using=kwargs.get("using")) ): # it already exists, so do a move old_parent_id = self._mptt_cached_fields[opts.parent_attr] if old_parent_id is DeferredAttribute: same_order = True else: same_order = old_parent_id == parent_id if same_order and len(self._mptt_cached_fields) > 1: for field_name, old_value in self._mptt_cached_fields.items(): if ( old_value is DeferredAttribute and field_name not in deferred_fields ): same_order = False break if old_value != opts.get_raw_field_value(self, field_name): same_order = False break if not do_updates and not same_order: same_order = True self.__class__._mptt_track_tree_modified(self._mpttfield("tree_id")) elif (not do_updates) and not same_order and old_parent_id is None: # the old tree no longer exists, so we need to collapse it. collapse_old_tree = self._mpttfield("tree_id") parent = getattr(self, opts.parent_attr) tree_id = parent._mpttfield("tree_id") left = parent._mpttfield("left") + 1 self.__class__._mptt_track_tree_modified(tree_id) setattr(self, opts.tree_id_attr, tree_id) setattr(self, opts.left_attr, left) setattr(self, opts.right_attr, left + 1) setattr(self, opts.level_attr, parent._mpttfield("level") + 1) same_order = True if not same_order: parent = getattr(self, opts.parent_attr) opts.set_raw_field_value(self, opts.parent_attr, old_parent_id) try: right_sibling = opts.get_ordered_insertion_target(self, parent) if parent_id is not None: # If we aren't already a descendant of the new parent, # we need to update the parent.rght so things like # get_children and get_descendant_count work correctly. # # parent might be None if parent_id was assigned # directly -- then we certainly do not have to update # the cached parent. update_cached_parent = parent and ( getattr(self, opts.tree_id_attr) != getattr(parent, opts.tree_id_attr) or getattr(self, opts.left_attr) < getattr(parent, opts.left_attr) or getattr(self, opts.right_attr) > getattr(parent, opts.right_attr) ) if right_sibling: self._tree_manager._move_node( self, right_sibling, "left", save=False, refresh_target=False, ) else: # Default movement if parent_id is None: root_nodes = self._tree_manager.root_nodes() try: rightmost_sibling = root_nodes.exclude( pk=self.pk ).order_by("-" + opts.tree_id_attr)[0] self._tree_manager._move_node( self, rightmost_sibling, "right", save=False, refresh_target=False, ) except IndexError: pass else: self._tree_manager._move_node( self, parent, "last-child", save=False ) if parent_id is not None and update_cached_parent: # Update rght of cached parent right_shift = 2 * (self.get_descendant_count() + 1) self._tree_manager._post_insert_update_cached_parent_right( parent, right_shift ) finally: # Make sure the new parent is always # restored on the way out in case of errors. opts.set_raw_field_value(self, opts.parent_attr, parent_id) # If there were no exceptions raised then send a moved signal node_moved.send( sender=self.__class__, instance=self, target=getattr(self, opts.parent_attr), ) else: opts.set_raw_field_value(self, opts.parent_attr, parent_id) if not track_updates: # When not using delayed/disabled updates, # populate update_fields with user defined model fields. # This helps preserve tree integrity when saving model on top # of a modified tree. if len(args) > 3: if not args[3]: args = list(args) args[3] = self._get_user_field_names() args = tuple(args) else: if not kwargs.get("update_fields"): kwargs["update_fields"] = self._get_user_field_names() else: # new node, do an insert if getattr(self, opts.left_attr) and getattr(self, opts.right_attr): # This node has already been set up for insertion. pass else: parent = getattr(self, opts.parent_attr) right_sibling = None # if we're inside delay_mptt_updates, don't do queries to find # sibling position. instead, do default insertion. correct # positions will be found during partial rebuild later. # *unless* this is a root node. (as update tracking doesn't # handle re-ordering of trees.) if (do_updates or parent is None) and opts.order_insertion_by: right_sibling = opts.get_ordered_insertion_target(self, parent) if right_sibling: self.insert_at( right_sibling, "left", allow_existing_pk=True, refresh_target=False, ) if parent: # since we didn't insert into parent, we have to update parent.rght # here instead of in TreeManager.insert_node() right_shift = 2 * (self.get_descendant_count() + 1) self._tree_manager._post_insert_update_cached_parent_right( parent, right_shift ) else: # Default insertion self.insert_at( parent, position="last-child", allow_existing_pk=True ) try: super().save(*args, **kwargs) finally: if collapse_old_tree is not None: self._tree_manager._create_tree_space(collapse_old_tree, -1) self._mptt_saved = True opts.update_mptt_cached_fields(self) save.alters_data = True def delete(self, *args, **kwargs): """Calling ``delete`` on a node will delete it as well as its full subtree, as opposed to reattaching all the subnodes to its parent node. There are no argument specific to a MPTT model, all the arguments will be passed directly to the django's ``Model.delete``. ``delete`` will not return anything.""" try: # We have to make sure we use database's mptt values, since they # could have changed between the moment the instance was retrieved and # the moment it is deleted. # This happens for example if you delete several nodes at once from a queryset. fields_to_refresh = [ self._mptt_meta.right_attr, self._mptt_meta.left_attr, self._mptt_meta.tree_id_attr, ] self.refresh_from_db(fields=fields_to_refresh) except self.__class__.DoesNotExist: # In case the object was already deleted, we don't want to throw an exception pass tree_width = self._mpttfield("right") - self._mpttfield("left") + 1 target_right = self._mpttfield("right") tree_id = self._mpttfield("tree_id") self._tree_manager._close_gap(tree_width, target_right, tree_id) parent = cached_field_value(self, self._mptt_meta.parent_attr) if parent: right_shift = -self.get_descendant_count() - 2 self._tree_manager._post_insert_update_cached_parent_right( parent, right_shift ) return super().delete(*args, **kwargs) delete.alters_data = True def _mptt_refresh(self): if not self.pk: return manager = type(self)._tree_manager opts = self._mptt_meta values = ( manager.using(self._state.db) .filter(pk=self.pk) .values( opts.left_attr, opts.right_attr, opts.level_attr, opts.tree_id_attr, )[0] ) for k, v in values.items(): setattr(self, k, v) def _check_no_testing_generators(self): """Check that we are not generationg model from model_bakery""" if sys.argv[1:2] == ["test"]: # in testing environment curframe = inspect.currentframe() call_frame = inspect.getouterframes(curframe, 0) call_file = call_frame[5][1] call_directory = call_file.split("/")[-2] if "model_bakery" in call_file and not getattr( settings, "MPTT_ALLOW_TESTING_GENERATORS", False ): raise Exception( f"The {call_directory} populates django-mptt fields with random values which leads to unpredictable behavior. " "If you really want to generate this model that way, please set MPTT_ALLOW_TESTING_GENERATORS=True in your settings.", ) # Use _check_no_testing_generators function only if model_bakery is in the path. try: import model_bakery # noqa MPTTModel._check_no_testing_generators = _check_no_testing_generators except ImportError: pass
MPTTModel
python
django__django
tests/modeladmin/tests.py
{ "start": 898, "end": 34455 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.band = Band.objects.create( name="The Doors", bio="", sign_date=date(1965, 1, 1), ) def setUp(self): self.site = AdminSite() def test_modeladmin_str(self): ma = ModelAdmin(Band, self.site) self.assertEqual(str(ma), "modeladmin.ModelAdmin") def test_default_attributes(self): ma = ModelAdmin(Band, self.site) self.assertEqual(ma.actions, ()) self.assertEqual(ma.inlines, ()) # form/fields/fieldsets interaction ############################## def test_default_fields(self): ma = ModelAdmin(Band, self.site) self.assertEqual( list(ma.get_form(request).base_fields), ["name", "bio", "sign_date"] ) self.assertEqual(list(ma.get_fields(request)), ["name", "bio", "sign_date"]) self.assertEqual( list(ma.get_fields(request, self.band)), ["name", "bio", "sign_date"] ) self.assertIsNone(ma.get_exclude(request, self.band)) def test_default_fieldsets(self): # fieldsets_add and fieldsets_change should return a special data # structure that is used in the templates. They should generate the # "right thing" whether we have specified a custom form, the fields # argument, or nothing at all. # # Here's the default case. There are no custom form_add/form_change # methods, no fields argument, and no fieldsets argument. ma = ModelAdmin(Band, self.site) self.assertEqual( ma.get_fieldsets(request), [(None, {"fields": ["name", "bio", "sign_date"]})], ) self.assertEqual( ma.get_fieldsets(request, self.band), [(None, {"fields": ["name", "bio", "sign_date"]})], ) def test_get_fieldsets(self): # get_fieldsets() is called when figuring out form fields (#18681). class BandAdmin(ModelAdmin): def get_fieldsets(self, request, obj=None): return [(None, {"fields": ["name", "bio"]})] ma = BandAdmin(Band, self.site) form = ma.get_form(None) self.assertEqual(form._meta.fields, ["name", "bio"]) class InlineBandAdmin(TabularInline): model = Concert fk_name = "main_band" can_delete = False def get_fieldsets(self, request, obj=None): return [(None, {"fields": ["day", "transport"]})] ma = InlineBandAdmin(Band, self.site) form = ma.get_formset(None).form self.assertEqual(form._meta.fields, ["day", "transport"]) def test_lookup_allowed_allows_nonexistent_lookup(self): """ A lookup_allowed allows a parameter whose field lookup doesn't exist. (#21129). """ class BandAdmin(ModelAdmin): fields = ["name"] ma = BandAdmin(Band, self.site) self.assertIs( ma.lookup_allowed("name__nonexistent", "test_value", request), True, ) @isolate_apps("modeladmin") def test_lookup_allowed_onetoone(self): class Department(models.Model): code = models.CharField(max_length=4, unique=True) class Employee(models.Model): department = models.ForeignKey(Department, models.CASCADE, to_field="code") class EmployeeProfile(models.Model): employee = models.OneToOneField(Employee, models.CASCADE) class EmployeeInfo(models.Model): employee = models.OneToOneField(Employee, models.CASCADE) description = models.CharField(max_length=100) class EmployeeProfileAdmin(ModelAdmin): list_filter = [ "employee__employeeinfo__description", "employee__department__code", ] ma = EmployeeProfileAdmin(EmployeeProfile, self.site) # Reverse OneToOneField self.assertIs( ma.lookup_allowed( "employee__employeeinfo__description", "test_value", request ), True, ) # OneToOneField and ForeignKey self.assertIs( ma.lookup_allowed("employee__department__code", "test_value", request), True, ) @isolate_apps("modeladmin") def test_lookup_allowed_for_local_fk_fields(self): class Country(models.Model): pass class Place(models.Model): country = models.ForeignKey(Country, models.CASCADE) class PlaceAdmin(ModelAdmin): pass ma = PlaceAdmin(Place, self.site) cases = [ ("country", "1"), ("country__exact", "1"), ("country__id", "1"), ("country__id__exact", "1"), ("country__isnull", True), ("country__isnull", False), ("country__id__isnull", False), ] for lookup, lookup_value in cases: with self.subTest(lookup=lookup): self.assertIs(ma.lookup_allowed(lookup, lookup_value, request), True) @isolate_apps("modeladmin") def test_lookup_allowed_non_autofield_primary_key(self): class Country(models.Model): id = models.CharField(max_length=2, primary_key=True) class Place(models.Model): country = models.ForeignKey(Country, models.CASCADE) class PlaceAdmin(ModelAdmin): list_filter = ["country"] ma = PlaceAdmin(Place, self.site) self.assertIs(ma.lookup_allowed("country__id__exact", "DE", request), True) @isolate_apps("modeladmin") def test_lookup_allowed_foreign_primary(self): class Country(models.Model): name = models.CharField(max_length=256) class Place(models.Model): country = models.ForeignKey(Country, models.CASCADE) class Restaurant(models.Model): place = models.OneToOneField(Place, models.CASCADE, primary_key=True) class Waiter(models.Model): restaurant = models.ForeignKey(Restaurant, models.CASCADE) class WaiterAdmin(ModelAdmin): list_filter = [ "restaurant__place__country", "restaurant__place__country__name", ] ma = WaiterAdmin(Waiter, self.site) self.assertIs( ma.lookup_allowed("restaurant__place__country", "1", request), True, ) self.assertIs( ma.lookup_allowed("restaurant__place__country__id__exact", "1", request), True, ) self.assertIs( ma.lookup_allowed( "restaurant__place__country__name", "test_value", request ), True, ) def test_lookup_allowed_considers_dynamic_list_filter(self): class ConcertAdmin(ModelAdmin): list_filter = ["main_band__sign_date"] def get_list_filter(self, request): if getattr(request, "user", None): return self.list_filter + ["main_band__name"] return self.list_filter model_admin = ConcertAdmin(Concert, self.site) request_band_name_filter = RequestFactory().get( "/", {"main_band__name": "test"} ) self.assertIs( model_admin.lookup_allowed( "main_band__sign_date", "?", request_band_name_filter ), True, ) self.assertIs( model_admin.lookup_allowed( "main_band__name", "?", request_band_name_filter ), False, ) request_with_superuser = request self.assertIs( model_admin.lookup_allowed( "main_band__sign_date", "?", request_with_superuser ), True, ) self.assertIs( model_admin.lookup_allowed("main_band__name", "?", request_with_superuser), True, ) def test_field_arguments(self): # If fields is specified, fieldsets_add and fieldsets_change should # just stick the fields into a formsets structure and return it. class BandAdmin(ModelAdmin): fields = ["name"] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_fields(request)), ["name"]) self.assertEqual(list(ma.get_fields(request, self.band)), ["name"]) self.assertEqual(ma.get_fieldsets(request), [(None, {"fields": ["name"]})]) self.assertEqual( ma.get_fieldsets(request, self.band), [(None, {"fields": ["name"]})] ) def test_field_arguments_restricted_on_form(self): # If fields or fieldsets is specified, it should exclude fields on the # Form class to the fields specified. This may cause errors to be # raised in the db layer if required model fields aren't in fields/ # fieldsets, but that's preferable to ghost errors where a field in the # Form class isn't being displayed because it's not in # fields/fieldsets. # Using `fields`. class BandAdmin(ModelAdmin): fields = ["name"] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name"]) self.assertEqual(list(ma.get_form(request, self.band).base_fields), ["name"]) # Using `fieldsets`. class BandAdmin(ModelAdmin): fieldsets = [(None, {"fields": ["name"]})] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name"]) self.assertEqual(list(ma.get_form(request, self.band).base_fields), ["name"]) # Using `exclude`. class BandAdmin(ModelAdmin): exclude = ["bio"] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name", "sign_date"]) # You can also pass a tuple to `exclude`. class BandAdmin(ModelAdmin): exclude = ("bio",) ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name", "sign_date"]) # Using `fields` and `exclude`. class BandAdmin(ModelAdmin): fields = ["name", "bio"] exclude = ["bio"] ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name"]) def test_custom_form_meta_exclude_with_readonly(self): """ The custom ModelForm's `Meta.exclude` is respected when used in conjunction with `ModelAdmin.readonly_fields` and when no `ModelAdmin.exclude` is defined (#14496). """ # With ModelAdmin class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ["bio"] class BandAdmin(ModelAdmin): readonly_fields = ["name"] form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["sign_date"]) # With InlineModelAdmin class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ["day"] class ConcertInline(TabularInline): readonly_fields = ["transport"] form = AdminConcertForm fk_name = "main_band" model = Concert class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["main_band", "opening_band", "id", "DELETE"], ) def test_custom_formfield_override_readonly(self): class AdminBandForm(forms.ModelForm): name = forms.CharField() class Meta: exclude = () model = Band class BandAdmin(ModelAdmin): form = AdminBandForm readonly_fields = ["name"] ma = BandAdmin(Band, self.site) # `name` shouldn't appear in base_fields because it's part of # readonly_fields. self.assertEqual(list(ma.get_form(request).base_fields), ["bio", "sign_date"]) # But it should appear in get_fields()/fieldsets() so it can be # displayed as read-only. self.assertEqual(list(ma.get_fields(request)), ["bio", "sign_date", "name"]) self.assertEqual( list(ma.get_fieldsets(request)), [(None, {"fields": ["bio", "sign_date", "name"]})], ) def test_custom_form_meta_exclude(self): """ The custom ModelForm's `Meta.exclude` is overridden if `ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined (#14496). """ # With ModelAdmin class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ["bio"] class BandAdmin(ModelAdmin): exclude = ["name"] form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["bio", "sign_date"]) # With InlineModelAdmin class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ["day"] class ConcertInline(TabularInline): exclude = ["transport"] form = AdminConcertForm fk_name = "main_band" model = Concert class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["main_band", "opening_band", "day", "id", "DELETE"], ) def test_overriding_get_exclude(self): class BandAdmin(ModelAdmin): def get_exclude(self, request, obj=None): return ["name"] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request).base_fields), ["bio", "sign_date"], ) def test_get_exclude_overrides_exclude(self): class BandAdmin(ModelAdmin): exclude = ["bio"] def get_exclude(self, request, obj=None): return ["name"] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request).base_fields), ["bio", "sign_date"], ) def test_get_exclude_takes_obj(self): class BandAdmin(ModelAdmin): def get_exclude(self, request, obj=None): if obj: return ["sign_date"] return ["name"] self.assertEqual( list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields), ["name", "bio"], ) def test_custom_form_validation(self): # If a form is specified, it should use it allowing custom validation # to work properly. This won't break any of the admin widgets or media. class AdminBandForm(forms.ModelForm): delete = forms.BooleanField() class BandAdmin(ModelAdmin): form = AdminBandForm ma = BandAdmin(Band, self.site) self.assertEqual( list(ma.get_form(request).base_fields), ["name", "bio", "sign_date", "delete"], ) self.assertEqual( type(ma.get_form(request).base_fields["sign_date"].widget), AdminDateWidget ) def test_form_exclude_kwarg_override(self): """ The `exclude` kwarg passed to `ModelAdmin.get_form()` overrides all other declarations (#8999). """ class AdminBandForm(forms.ModelForm): class Meta: model = Band exclude = ["name"] class BandAdmin(ModelAdmin): exclude = ["sign_date"] form = AdminBandForm def get_form(self, request, obj=None, **kwargs): kwargs["exclude"] = ["bio"] return super().get_form(request, obj, **kwargs) ma = BandAdmin(Band, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["name", "sign_date"]) def test_formset_exclude_kwarg_override(self): """ The `exclude` kwarg passed to `InlineModelAdmin.get_formset()` overrides all other declarations (#8999). """ class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ["day"] class ConcertInline(TabularInline): exclude = ["transport"] form = AdminConcertForm fk_name = "main_band" model = Concert def get_formset(self, request, obj=None, **kwargs): kwargs["exclude"] = ["opening_band"] return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["main_band", "day", "transport", "id", "DELETE"], ) def test_formset_overriding_get_exclude_with_form_fields(self): class AdminConcertForm(forms.ModelForm): class Meta: model = Concert fields = ["main_band", "opening_band", "day", "transport"] class ConcertInline(TabularInline): form = AdminConcertForm fk_name = "main_band" model = Concert def get_exclude(self, request, obj=None): return ["opening_band"] class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["main_band", "day", "transport", "id", "DELETE"], ) def test_formset_overriding_get_exclude_with_form_exclude(self): class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ["day"] class ConcertInline(TabularInline): form = AdminConcertForm fk_name = "main_band" model = Concert def get_exclude(self, request, obj=None): return ["opening_band"] class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["main_band", "day", "transport", "id", "DELETE"], ) def test_raw_id_fields_widget_override(self): """ The autocomplete_fields, raw_id_fields, and radio_fields widgets may overridden by specifying a widget in get_formset(). """ class ConcertInline(TabularInline): model = Concert fk_name = "main_band" raw_id_fields = ("opening_band",) def get_formset(self, request, obj=None, **kwargs): kwargs["widgets"] = {"opening_band": Select} return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) band_widget = ( list(ma.get_formsets_with_inlines(request))[0][0]() .forms[0] .fields["opening_band"] .widget ) # Without the override this would be ForeignKeyRawIdWidget. self.assertIsInstance(band_widget, Select) def test_queryset_override(self): # If the queryset of a ModelChoiceField in a custom form is overridden, # RelatedFieldWidgetWrapper doesn't mess that up. band2 = Band.objects.create( name="The Beatles", bio="", sign_date=date(1962, 1, 1) ) ma = ModelAdmin(Concert, self.site) form = ma.get_form(request)() self.assertHTMLEqual( str(form["main_band"]), '<div class="related-widget-wrapper" data-model-ref="band">' '<select data-context="available-source" ' 'name="main_band" id="id_main_band" required>' '<option value="" selected>---------</option>' '<option value="%d">The Beatles</option>' '<option value="%d">The Doors</option>' "</select></div>" % (band2.id, self.band.id), ) class AdminConcertForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields["main_band"].queryset = Band.objects.filter( name="The Doors" ) class ConcertAdminWithForm(ModelAdmin): form = AdminConcertForm ma = ConcertAdminWithForm(Concert, self.site) form = ma.get_form(request)() self.assertHTMLEqual( str(form["main_band"]), '<div class="related-widget-wrapper" data-model-ref="band">' '<select data-context="available-source" ' 'name="main_band" id="id_main_band" required>' '<option value="" selected>---------</option>' '<option value="%d">The Doors</option>' "</select></div>" % self.band.id, ) def test_regression_for_ticket_15820(self): """ `obj` is passed from `InlineModelAdmin.get_fieldsets()` to `InlineModelAdmin.get_formset()`. """ class CustomConcertForm(forms.ModelForm): class Meta: model = Concert fields = ["day"] class ConcertInline(TabularInline): model = Concert fk_name = "main_band" def get_formset(self, request, obj=None, **kwargs): if obj: kwargs["form"] = CustomConcertForm return super().get_formset(request, obj, **kwargs) class BandAdmin(ModelAdmin): inlines = [ConcertInline] Concert.objects.create(main_band=self.band, opening_band=self.band, day=1) ma = BandAdmin(Band, self.site) inline_instances = ma.get_inline_instances(request) fieldsets = list(inline_instances[0].get_fieldsets(request)) self.assertEqual( fieldsets[0][1]["fields"], ["main_band", "opening_band", "day", "transport"] ) fieldsets = list( inline_instances[0].get_fieldsets(request, inline_instances[0].model) ) self.assertEqual(fieldsets[0][1]["fields"], ["day"]) # radio_fields behavior ########################################### def test_default_foreign_key_widget(self): # First, without any radio_fields specified, the widgets for ForeignKey # and fields with choices specified ought to be a basic Select widget. # ForeignKey widgets in the admin are wrapped with # RelatedFieldWidgetWrapper so they need to be handled properly when # type checking. For Select fields, all of the choices lists have a # first entry of dashes. cma = ModelAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual(type(cmafa.base_fields["main_band"].widget.widget), Select) self.assertEqual( list(cmafa.base_fields["main_band"].widget.choices), [("", "---------"), (self.band.id, "The Doors")], ) self.assertEqual(type(cmafa.base_fields["opening_band"].widget.widget), Select) self.assertEqual( list(cmafa.base_fields["opening_band"].widget.choices), [("", "---------"), (self.band.id, "The Doors")], ) self.assertEqual(type(cmafa.base_fields["day"].widget), Select) self.assertEqual( list(cmafa.base_fields["day"].widget.choices), [("", "---------"), (1, "Fri"), (2, "Sat")], ) self.assertEqual(type(cmafa.base_fields["transport"].widget), Select) self.assertEqual( list(cmafa.base_fields["transport"].widget.choices), [("", "---------"), (1, "Plane"), (2, "Train"), (3, "Bus")], ) def test_foreign_key_as_radio_field(self): # Now specify all the fields as radio_fields. Widgets should now be # RadioSelect, and the choices list should have a first entry of 'None' # if blank=True for the model field. Finally, the widget should have # the 'radiolist' attr, and 'inline' as well if the field is specified # HORIZONTAL. class ConcertAdmin(ModelAdmin): radio_fields = { "main_band": HORIZONTAL, "opening_band": VERTICAL, "day": VERTICAL, "transport": HORIZONTAL, } cma = ConcertAdmin(Concert, self.site) cmafa = cma.get_form(request) self.assertEqual( type(cmafa.base_fields["main_band"].widget.widget), AdminRadioSelect ) self.assertEqual( cmafa.base_fields["main_band"].widget.attrs, {"class": "radiolist inline", "data-context": "available-source"}, ) self.assertEqual( list(cmafa.base_fields["main_band"].widget.choices), [(self.band.id, "The Doors")], ) self.assertEqual( type(cmafa.base_fields["opening_band"].widget.widget), AdminRadioSelect ) self.assertEqual( cmafa.base_fields["opening_band"].widget.attrs, {"class": "radiolist", "data-context": "available-source"}, ) self.assertEqual( list(cmafa.base_fields["opening_band"].widget.choices), [("", "None"), (self.band.id, "The Doors")], ) self.assertEqual(type(cmafa.base_fields["day"].widget), AdminRadioSelect) self.assertEqual(cmafa.base_fields["day"].widget.attrs, {"class": "radiolist"}) self.assertEqual( list(cmafa.base_fields["day"].widget.choices), [(1, "Fri"), (2, "Sat")] ) self.assertEqual(type(cmafa.base_fields["transport"].widget), AdminRadioSelect) self.assertEqual( cmafa.base_fields["transport"].widget.attrs, {"class": "radiolist inline"} ) self.assertEqual( list(cmafa.base_fields["transport"].widget.choices), [("", "None"), (1, "Plane"), (2, "Train"), (3, "Bus")], ) class AdminConcertForm(forms.ModelForm): class Meta: model = Concert exclude = ("transport",) class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual( list(ma.get_form(request).base_fields), ["main_band", "opening_band", "day"] ) class AdminConcertForm(forms.ModelForm): extra = forms.CharField() class Meta: model = Concert fields = ["extra", "transport"] class ConcertAdmin(ModelAdmin): form = AdminConcertForm ma = ConcertAdmin(Concert, self.site) self.assertEqual(list(ma.get_form(request).base_fields), ["extra", "transport"]) class ConcertInline(TabularInline): form = AdminConcertForm model = Concert fk_name = "main_band" can_delete = True class BandAdmin(ModelAdmin): inlines = [ConcertInline] ma = BandAdmin(Band, self.site) self.assertEqual( list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields), ["extra", "transport", "id", "DELETE", "main_band"], ) def test_log_actions(self): ma = ModelAdmin(Band, self.site) mock_request = MockRequest() mock_request.user = User.objects.create(username="bill") content_type = get_content_type_for_model(self.band) tests = ( (ma.log_addition, ADDITION, {"added": {}}), (ma.log_change, CHANGE, {"changed": {"fields": ["name", "bio"]}}), ) for method, flag, message in tests: with self.subTest(name=method.__name__): created = method(mock_request, self.band, message) fetched = LogEntry.objects.filter(action_flag=flag).latest("id") self.assertEqual(created, fetched) self.assertEqual(fetched.action_flag, flag) self.assertEqual(fetched.content_type, content_type) self.assertEqual(fetched.object_id, str(self.band.pk)) self.assertEqual(fetched.user, mock_request.user) self.assertEqual(fetched.change_message, str(message)) self.assertEqual(fetched.object_repr, str(self.band)) def test_log_deletions(self): ma = ModelAdmin(Band, self.site) mock_request = MockRequest() mock_request.user = User.objects.create(username="akash") content_type = get_content_type_for_model(self.band) Band.objects.create( name="The Beatles", bio="A legendary rock band from Liverpool.", sign_date=date(1962, 1, 1), ) Band.objects.create( name="Mohiner Ghoraguli", bio="A progressive rock band from Calcutta.", sign_date=date(1975, 1, 1), ) queryset = Band.objects.all().order_by("-id")[:3] self.assertEqual(len(queryset), 3) with self.assertNumQueries(1): ma.log_deletions(mock_request, queryset) logs = ( LogEntry.objects.filter(action_flag=DELETION) .order_by("id") .values_list( "user_id", "content_type", "object_id", "object_repr", "action_flag", "change_message", ) ) expected_log_values = [ ( mock_request.user.id, content_type.id, str(obj.pk), str(obj), DELETION, "", ) for obj in queryset ] self.assertSequenceEqual(logs, expected_log_values) def test_get_autocomplete_fields(self): class NameAdmin(ModelAdmin): search_fields = ["name"] class SongAdmin(ModelAdmin): autocomplete_fields = ["featuring"] fields = ["featuring", "band"] class OtherSongAdmin(SongAdmin): def get_autocomplete_fields(self, request): return ["band"] self.site.register(Band, NameAdmin) try: # Uses autocomplete_fields if not overridden. model_admin = SongAdmin(Song, self.site) form = model_admin.get_form(request)() self.assertIsInstance( form.fields["featuring"].widget.widget, AutocompleteSelectMultiple ) # Uses overridden get_autocomplete_fields model_admin = OtherSongAdmin(Song, self.site) form = model_admin.get_form(request)() self.assertIsInstance(form.fields["band"].widget.widget, AutocompleteSelect) finally: self.site.unregister(Band) def test_get_deleted_objects(self): mock_request = MockRequest() mock_request.user = User.objects.create_superuser( username="bob", email="bob@test.com", password="test" ) self.site.register(Band, ModelAdmin) ma = self.site.get_model_admin(Band) ( deletable_objects, model_count, perms_needed, protected, ) = ma.get_deleted_objects([self.band], request) self.assertEqual(deletable_objects, ["Band: The Doors"]) self.assertEqual(model_count, {"bands": 1}) self.assertEqual(perms_needed, set()) self.assertEqual(protected, []) def test_get_deleted_objects_with_custom_has_delete_permission(self): """ ModelAdmin.get_deleted_objects() uses ModelAdmin.has_delete_permission() for permissions checking. """ mock_request = MockRequest() mock_request.user = User.objects.create_superuser( username="bob", email="bob@test.com", password="test" ) class TestModelAdmin(ModelAdmin): def has_delete_permission(self, request, obj=None): return False self.site.register(Band, TestModelAdmin) ma = self.site.get_model_admin(Band) ( deletable_objects, model_count, perms_needed, protected, ) = ma.get_deleted_objects([self.band], request) self.assertEqual(deletable_objects, ["Band: The Doors"]) self.assertEqual(model_count, {"bands": 1}) self.assertEqual(perms_needed, {"band"}) self.assertEqual(protected, []) def test_modeladmin_repr(self): ma = ModelAdmin(Band, self.site) self.assertEqual( repr(ma), "<ModelAdmin: model=Band site=AdminSite(name='admin')>", )
ModelAdminTests
python
doocs__leetcode
solution/0500-0599/0548.Split Array with Equal Sum/Solution.py
{ "start": 0, "end": 538 }
class ____: def splitArray(self, nums: List[int]) -> bool: n = len(nums) s = [0] * (n + 1) for i, v in enumerate(nums): s[i + 1] = s[i] + v for j in range(3, n - 3): seen = set() for i in range(1, j - 1): if s[i] == s[j] - s[i + 1]: seen.add(s[i]) for k in range(j + 2, n - 1): if s[n] - s[k + 1] == s[k] - s[j + 1] and s[n] - s[k + 1] in seen: return True return False
Solution
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/tokens.py
{ "start": 9452, "end": 9697 }
class ____(Token): __slots__ = ('value',) id = '<alias>' def __init__(self, value, start_mark, end_mark): # type: (Any, Any, Any) -> None Token.__init__(self, start_mark, end_mark) self.value = value
AliasToken
python
walkccc__LeetCode
solutions/3095. Shortest Subarray With OR at Least K I/3095.py
{ "start": 0, "end": 835 }
class ____: def minimumSubarrayLength(self, nums: list[int], k: int) -> int: ans = len(nums) + 1 ors = 0 count = collections.Counter() l = 0 for r, num in enumerate(nums): ors = self._orNum(ors, num, count) while ors >= k and l <= r: ans = min(ans, r - l + 1) ors = self._undoOrNum(ors, nums[l], count) l += 1 return -1 if ans == len(nums) + 1 else ans def _orNum(self, ors: int, num: int, count: dict[int, int]) -> int: for i in range(30): if num >> i & 1: count[i] += 1 if count[i] == 1: ors += 1 << i return ors def _undoOrNum(self, ors: int, num: int, count: dict[int, int]) -> int: for i in range(30): if num >> i & 1: count[i] -= 1 if count[i] == 0: ors -= 1 << i return ors
Solution
python
huggingface__transformers
src/transformers/models/clip/processing_clip.py
{ "start": 699, "end": 1445 }
class ____(ProcessorMixin): r""" Constructs a CLIP processor which wraps a CLIP image processor and a CLIP tokenizer into a single processor. [`CLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~CLIPProcessor.__call__`] and [`~CLIPProcessor.decode`] for more information. Args: image_processor ([`CLIPImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`AutoTokenizer`], *optional*): The tokenizer is a required input. """ def __init__(self, image_processor=None, tokenizer=None, **kwargs): super().__init__(image_processor, tokenizer) __all__ = ["CLIPProcessor"]
CLIPProcessor
python
sqlalchemy__sqlalchemy
test/engine/test_execute.py
{ "start": 125831, "end": 134979 }
class ____(fixtures.TablesTest): __backend__ = True __requires__ = ("independent_connections", "insert_returning") @classmethod def define_tables(cls, metadata): Table( "users", metadata, Column("user_id", INT, primary_key=True, autoincrement=False), Column("user_name", VARCHAR(20)), ) @testing.fixture def input_sizes_fixture(self, testing_engine): canary = mock.Mock() def do_set_input_sizes(cursor, list_of_tuples, context): canary.do_set_input_sizes(cursor, list_of_tuples, context) def pre_exec(self): self.translate_set_input_sizes = None self.include_set_input_sizes = None self.exclude_set_input_sizes = None engine = testing_engine() engine.connect().close() # the idea of this test is we fully replace the dialect # do_set_input_sizes with a mock, and we can then intercept # the setting passed to the dialect. the test table uses very # "safe" datatypes so that the DBAPI does not actually need # setinputsizes() called in order to work. with ( mock.patch.object( engine.dialect, "bind_typing", BindTyping.SETINPUTSIZES ), mock.patch.object( engine.dialect, "do_set_input_sizes", do_set_input_sizes ), mock.patch.object( engine.dialect.execution_ctx_cls, "pre_exec", pre_exec ), ): yield engine, canary @testing.requires.insertmanyvalues def test_set_input_sizes_insertmanyvalues_no_event( self, input_sizes_fixture ): engine, canary = input_sizes_fixture with engine.begin() as conn: conn.execute( self.tables.users.insert().returning( self.tables.users.c.user_id ), [ {"user_id": 1, "user_name": "n1"}, {"user_id": 2, "user_name": "n2"}, {"user_id": 3, "user_name": "n3"}, ], ) eq_( canary.mock_calls, [ call.do_set_input_sizes( mock.ANY, [ ( "user_id_0", mock.ANY, testing.eq_type_affinity(Integer), ), ( "user_name_0", mock.ANY, testing.eq_type_affinity(String), ), ( "user_id_1", mock.ANY, testing.eq_type_affinity(Integer), ), ( "user_name_1", mock.ANY, testing.eq_type_affinity(String), ), ( "user_id_2", mock.ANY, testing.eq_type_affinity(Integer), ), ( "user_name_2", mock.ANY, testing.eq_type_affinity(String), ), ], mock.ANY, ) ], ) def test_set_input_sizes_no_event(self, input_sizes_fixture): engine, canary = input_sizes_fixture with engine.begin() as conn: conn.execute( self.tables.users.update() .where(self.tables.users.c.user_id == 15) .values(user_id=15, user_name="n1"), ) eq_( canary.mock_calls, [ call.do_set_input_sizes( mock.ANY, [ ( "user_id", mock.ANY, testing.eq_type_affinity(Integer), ), ( "user_name", mock.ANY, testing.eq_type_affinity(String), ), ( "user_id_1", mock.ANY, testing.eq_type_affinity(Integer), ), ], mock.ANY, ) ], ) def test_set_input_sizes_expanding_param(self, input_sizes_fixture): engine, canary = input_sizes_fixture with engine.connect() as conn: conn.execute( select(self.tables.users).where( self.tables.users.c.user_name.in_(["x", "y", "z"]) ) ) eq_( canary.mock_calls, [ call.do_set_input_sizes( mock.ANY, [ ( "user_name_1_1", mock.ANY, testing.eq_type_affinity(String), ), ( "user_name_1_2", mock.ANY, testing.eq_type_affinity(String), ), ( "user_name_1_3", mock.ANY, testing.eq_type_affinity(String), ), ], mock.ANY, ) ], ) @testing.requires.tuple_in def test_set_input_sizes_expanding_tuple_param(self, input_sizes_fixture): engine, canary = input_sizes_fixture from sqlalchemy import tuple_ with engine.connect() as conn: conn.execute( select(self.tables.users).where( tuple_( self.tables.users.c.user_id, self.tables.users.c.user_name, ).in_([(1, "x"), (2, "y")]) ) ) eq_( canary.mock_calls, [ call.do_set_input_sizes( mock.ANY, [ ( "param_1_1_1", mock.ANY, testing.eq_type_affinity(Integer), ), ( "param_1_1_2", mock.ANY, testing.eq_type_affinity(String), ), ( "param_1_2_1", mock.ANY, testing.eq_type_affinity(Integer), ), ( "param_1_2_2", mock.ANY, testing.eq_type_affinity(String), ), ], mock.ANY, ) ], ) def test_set_input_sizes_event(self, input_sizes_fixture): engine, canary = input_sizes_fixture SPECIAL_STRING = mock.Mock() @event.listens_for(engine, "do_setinputsizes") def do_setinputsizes( inputsizes, cursor, statement, parameters, context ): for k in inputsizes: if k.type._type_affinity is String: inputsizes[k] = ( SPECIAL_STRING, None, 0, ) with engine.begin() as conn: conn.execute( self.tables.users.update() .where(self.tables.users.c.user_id == 15) .values(user_id=15, user_name="n1"), ) eq_( canary.mock_calls, [ call.do_set_input_sizes( mock.ANY, [ ( "user_id", mock.ANY, testing.eq_type_affinity(Integer), ), ( "user_name", (SPECIAL_STRING, None, 0), testing.eq_type_affinity(String), ), ( "user_id_1", mock.ANY, testing.eq_type_affinity(Integer), ), ], mock.ANY, ) ], )
SetInputSizesTest
python
sympy__sympy
sympy/parsing/sym_expr.py
{ "start": 458, "end": 8895 }
class ____: # type: ignore """Class to store and handle SymPy expressions This class will hold SymPy Expressions and handle the API for the conversion to and from different languages. It works with the C and the Fortran Parser to generate SymPy expressions which are stored here and which can be converted to multiple language's source code. Notes ===== The module and its API are currently under development and experimental and can be changed during development. The Fortran parser does not support numeric assignments, so all the variables have been Initialized to zero. The module also depends on external dependencies: - LFortran which is required to use the Fortran parser - Clang which is required for the C parser Examples ======== Example of parsing C code: >>> from sympy.parsing.sym_expr import SymPyExpression >>> src = ''' ... int a,b; ... float c = 2, d =4; ... ''' >>> a = SymPyExpression(src, 'c') >>> a.return_expr() [Declaration(Variable(a, type=intc)), Declaration(Variable(b, type=intc)), Declaration(Variable(c, type=float32, value=2.0)), Declaration(Variable(d, type=float32, value=4.0))] An example of variable definition: >>> from sympy.parsing.sym_expr import SymPyExpression >>> src2 = ''' ... integer :: a, b, c, d ... real :: p, q, r, s ... ''' >>> p = SymPyExpression() >>> p.convert_to_expr(src2, 'f') >>> p.convert_to_c() ['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0'] An example of Assignment: >>> from sympy.parsing.sym_expr import SymPyExpression >>> src3 = ''' ... integer :: a, b, c, d, e ... d = a + b - c ... e = b * d + c * e / a ... ''' >>> p = SymPyExpression(src3, 'f') >>> p.convert_to_python() ['a = 0', 'b = 0', 'c = 0', 'd = 0', 'e = 0', 'd = a + b - c', 'e = b*d + c*e/a'] An example of function definition: >>> from sympy.parsing.sym_expr import SymPyExpression >>> src = ''' ... integer function f(a,b) ... integer, intent(in) :: a, b ... integer :: r ... end function ... ''' >>> a = SymPyExpression(src, 'f') >>> a.convert_to_python() ['def f(a, b):\\n f = 0\\n r = 0\\n return f'] """ def __init__(self, source_code = None, mode = None): """Constructor for SymPyExpression class""" super().__init__() if not(mode or source_code): self._expr = [] elif mode: if source_code: if mode.lower() == 'f': if lfortran: self._expr = src_to_sympy(source_code) else: raise ImportError("LFortran is not installed, cannot parse Fortran code") elif mode.lower() == 'c': if cin: self._expr = parse_c(source_code) else: raise ImportError("Clang is not installed, cannot parse C code") else: raise NotImplementedError( 'Parser for specified language is not implemented' ) else: raise ValueError('Source code not present') else: raise ValueError('Please specify a mode for conversion') def convert_to_expr(self, src_code, mode): """Converts the given source code to SymPy Expressions Attributes ========== src_code : String the source code or filename of the source code that is to be converted mode: String the mode to determine which parser is to be used according to the language of the source code f or F for Fortran c or C for C/C++ Examples ======== >>> from sympy.parsing.sym_expr import SymPyExpression >>> src3 = ''' ... integer function f(a,b) result(r) ... integer, intent(in) :: a, b ... integer :: x ... r = a + b -x ... end function ... ''' >>> p = SymPyExpression() >>> p.convert_to_expr(src3, 'f') >>> p.return_expr() [FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock( Declaration(Variable(r, type=integer, value=0)), Declaration(Variable(x, type=integer, value=0)), Assignment(Variable(r), a + b - x), Return(Variable(r)) ))] """ if mode.lower() == 'f': if lfortran: self._expr = src_to_sympy(src_code) else: raise ImportError("LFortran is not installed, cannot parse Fortran code") elif mode.lower() == 'c': if cin: self._expr = parse_c(src_code) else: raise ImportError("Clang is not installed, cannot parse C code") else: raise NotImplementedError( "Parser for specified language has not been implemented" ) def convert_to_python(self): """Returns a list with Python code for the SymPy expressions Examples ======== >>> from sympy.parsing.sym_expr import SymPyExpression >>> src2 = ''' ... integer :: a, b, c, d ... real :: p, q, r, s ... c = a/b ... d = c/a ... s = p/q ... r = q/p ... ''' >>> p = SymPyExpression(src2, 'f') >>> p.convert_to_python() ['a = 0', 'b = 0', 'c = 0', 'd = 0', 'p = 0.0', 'q = 0.0', 'r = 0.0', 's = 0.0', 'c = a/b', 'd = c/a', 's = p/q', 'r = q/p'] """ self._pycode = [] for iter in self._expr: self._pycode.append(pycode(iter)) return self._pycode def convert_to_c(self): """Returns a list with the c source code for the SymPy expressions Examples ======== >>> from sympy.parsing.sym_expr import SymPyExpression >>> src2 = ''' ... integer :: a, b, c, d ... real :: p, q, r, s ... c = a/b ... d = c/a ... s = p/q ... r = q/p ... ''' >>> p = SymPyExpression() >>> p.convert_to_expr(src2, 'f') >>> p.convert_to_c() ['int a = 0', 'int b = 0', 'int c = 0', 'int d = 0', 'double p = 0.0', 'double q = 0.0', 'double r = 0.0', 'double s = 0.0', 'c = a/b;', 'd = c/a;', 's = p/q;', 'r = q/p;'] """ self._ccode = [] for iter in self._expr: self._ccode.append(ccode(iter)) return self._ccode def convert_to_fortran(self): """Returns a list with the fortran source code for the SymPy expressions Examples ======== >>> from sympy.parsing.sym_expr import SymPyExpression >>> src2 = ''' ... integer :: a, b, c, d ... real :: p, q, r, s ... c = a/b ... d = c/a ... s = p/q ... r = q/p ... ''' >>> p = SymPyExpression(src2, 'f') >>> p.convert_to_fortran() [' integer*4 a', ' integer*4 b', ' integer*4 c', ' integer*4 d', ' real*8 p', ' real*8 q', ' real*8 r', ' real*8 s', ' c = a/b', ' d = c/a', ' s = p/q', ' r = q/p'] """ self._fcode = [] for iter in self._expr: self._fcode.append(fcode(iter)) return self._fcode def return_expr(self): """Returns the expression list Examples ======== >>> from sympy.parsing.sym_expr import SymPyExpression >>> src3 = ''' ... integer function f(a,b) ... integer, intent(in) :: a, b ... integer :: r ... r = a+b ... f = r ... end function ... ''' >>> p = SymPyExpression() >>> p.convert_to_expr(src3, 'f') >>> p.return_expr() [FunctionDefinition(integer, name=f, parameters=(Variable(a), Variable(b)), body=CodeBlock( Declaration(Variable(f, type=integer, value=0)), Declaration(Variable(r, type=integer, value=0)), Assignment(Variable(f), Variable(r)), Return(Variable(f)) ))] """ return self._expr
SymPyExpression
python
numba__numba
numba/tests/test_pipeline.py
{ "start": 2760, "end": 5254 }
class ____(TestCase): def _create_pipeline_w_del(self, base=None, inject_after=None): """ Creates a new compiler pipeline with the _InjectDelsPass injected after the pass supplied in kwarg 'inject_after'. """ self.assertTrue(inject_after is not None) self.assertTrue(base is not None) @register_pass(mutates_CFG=False, analysis_only=False) class _InjectDelsPass(base): """ This pass injects ir.Del nodes into the IR """ _name = "inject_dels_%s" % str(base) def __init__(self): base.__init__(self) def run_pass(self, state): pp = postproc.PostProcessor(state.func_ir) pp.run(emit_dels=True) return True class TestCompiler(Compiler): def define_pipelines(self): pm = DefaultPassBuilder.define_nopython_pipeline(self.state) pm.add_pass_after(_InjectDelsPass, inject_after) pm.finalize() return [pm] return TestCompiler def test_compiler_error_on_ir_del_from_functionpass(self): new_compiler = self._create_pipeline_w_del(FunctionPass, InlineInlinables) @njit(pipeline_class=new_compiler) def foo(x): return x + 1 with self.assertRaises(errors.CompilerError) as raises: foo(10) errstr = str(raises.exception) self.assertIn("Illegal IR, del found at:", errstr) self.assertIn("del x", errstr) def test_no_compiler_error_on_ir_del_after_legalization(self): # Legalization should be the last FunctionPass to execute so it's fine # for it to emit ir.Del nodes as no further FunctionPasses will run and # therefore the checking routine in the PassManager won't execute. # This test adds a new pass that is an AnalysisPass into the pipeline # after legalisation, this pass will return with already existing dels # in the IR but by virtue of it being an AnalysisPass the checking # routine won't execute. new_compiler = self._create_pipeline_w_del(AnalysisPass, IRLegalization) @njit(pipeline_class=new_compiler) def foo(x): return x + 1 self.assertTrue(foo(10), foo.py_func(10))
TestPassManagerFunctionality
python
huggingface__transformers
src/transformers/image_utils.py
{ "start": 2658, "end": 20402 }
class ____(ExplicitEnum): PIL = "pillow" TORCH = "torch" NUMPY = "numpy" def get_image_type(image): if is_pil_image(image): return ImageType.PIL if is_torch_tensor(image): return ImageType.TORCH if is_numpy_array(image): return ImageType.NUMPY raise ValueError(f"Unrecognized image type {type(image)}") def is_valid_image(img): return is_pil_image(img) or is_numpy_array(img) or is_torch_tensor(img) def is_valid_list_of_images(images: list): return images and all(is_valid_image(image) for image in images) def concatenate_list(input_list): if isinstance(input_list[0], list): return [item for sublist in input_list for item in sublist] elif isinstance(input_list[0], np.ndarray): return np.concatenate(input_list, axis=0) elif isinstance(input_list[0], torch.Tensor): return torch.cat(input_list, dim=0) def valid_images(imgs): # If we have an list of images, make sure every image is valid if isinstance(imgs, (list, tuple)): for img in imgs: if not valid_images(img): return False # If not a list of tuple, we have been given a single image or batched tensor of images elif not is_valid_image(imgs): return False return True def is_batched(img): if isinstance(img, (list, tuple)): return is_valid_image(img[0]) return False def is_scaled_image(image: np.ndarray) -> bool: """ Checks to see whether the pixel values have already been rescaled to [0, 1]. """ if image.dtype == np.uint8: return False # It's possible the image has pixel values in [0, 255] but is of floating type return np.min(image) >= 0 and np.max(image) <= 1 def make_list_of_images(images, expected_ndims: int = 3) -> list[ImageInput]: """ Ensure that the output is a list of images. If the input is a single image, it is converted to a list of length 1. If the input is a batch of images, it is converted to a list of images. Args: images (`ImageInput`): Image of images to turn into a list of images. expected_ndims (`int`, *optional*, defaults to 3): Expected number of dimensions for a single input image. If the input image has a different number of dimensions, an error is raised. """ if is_batched(images): return images # Either the input is a single image, in which case we create a list of length 1 if is_pil_image(images): # PIL images are never batched return [images] if is_valid_image(images): if images.ndim == expected_ndims + 1: # Batch of images images = list(images) elif images.ndim == expected_ndims: # Single image images = [images] else: raise ValueError( f"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got" f" {images.ndim} dimensions." ) return images raise ValueError( f"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, or torch.Tensor, but got {type(images)}." ) def make_flat_list_of_images( images: Union[list[ImageInput], ImageInput], expected_ndims: int = 3, ) -> ImageInput: """ Ensure that the output is a flat list of images. If the input is a single image, it is converted to a list of length 1. If the input is a nested list of images, it is converted to a flat list of images. Args: images (`Union[list[ImageInput], ImageInput]`): The input image. expected_ndims (`int`, *optional*, defaults to 3): The expected number of dimensions for a single input image. Returns: list: A list of images or a 4d array of images. """ # If the input is a nested list of images, we flatten it if ( isinstance(images, (list, tuple)) and all(isinstance(images_i, (list, tuple)) for images_i in images) and all(is_valid_list_of_images(images_i) or not images_i for images_i in images) ): return [img for img_list in images for img in img_list] if isinstance(images, (list, tuple)) and is_valid_list_of_images(images): if is_pil_image(images[0]) or images[0].ndim == expected_ndims: return images if images[0].ndim == expected_ndims + 1: return [img for img_list in images for img in img_list] if is_valid_image(images): if is_pil_image(images) or images.ndim == expected_ndims: return [images] if images.ndim == expected_ndims + 1: return list(images) raise ValueError(f"Could not make a flat list of images from {images}") def make_nested_list_of_images( images: Union[list[ImageInput], ImageInput], expected_ndims: int = 3, ) -> list[ImageInput]: """ Ensure that the output is a nested list of images. Args: images (`Union[list[ImageInput], ImageInput]`): The input image. expected_ndims (`int`, *optional*, defaults to 3): The expected number of dimensions for a single input image. Returns: list: A list of list of images or a list of 4d array of images. """ # If it's a list of batches, it's already in the right format if ( isinstance(images, (list, tuple)) and all(isinstance(images_i, (list, tuple)) for images_i in images) and all(is_valid_list_of_images(images_i) or not images_i for images_i in images) ): return images # If it's a list of images, it's a single batch, so convert it to a list of lists if isinstance(images, (list, tuple)) and is_valid_list_of_images(images): if is_pil_image(images[0]) or images[0].ndim == expected_ndims: return [images] if images[0].ndim == expected_ndims + 1: return [list(image) for image in images] # If it's a single image, convert it to a list of lists if is_valid_image(images): if is_pil_image(images) or images.ndim == expected_ndims: return [[images]] if images.ndim == expected_ndims + 1: return [list(images)] raise ValueError("Invalid input type. Must be a single image, a list of images, or a list of batches of images.") def to_numpy_array(img) -> np.ndarray: if not is_valid_image(img): raise ValueError(f"Invalid image type: {type(img)}") if is_vision_available() and isinstance(img, PIL.Image.Image): return np.array(img) return to_numpy(img) def infer_channel_dimension_format( image: np.ndarray, num_channels: Optional[Union[int, tuple[int, ...]]] = None ) -> ChannelDimension: """ Infers the channel dimension format of `image`. Args: image (`np.ndarray`): The image to infer the channel dimension of. num_channels (`int` or `tuple[int, ...]`, *optional*, defaults to `(1, 3)`): The number of channels of the image. Returns: The channel dimension of the image. """ num_channels = num_channels if num_channels is not None else (1, 3) num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels if image.ndim == 3: first_dim, last_dim = 0, 2 elif image.ndim == 4: first_dim, last_dim = 1, 3 elif image.ndim == 5: first_dim, last_dim = 2, 4 else: raise ValueError(f"Unsupported number of image dimensions: {image.ndim}") if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels: logger.warning( f"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension. Use the [input_data_format](https://huggingface.co/docs/transformers/main/internal/image_processing_utils#transformers.image_transforms.rescale.input_data_format) parameter to assign the channel dimension." ) return ChannelDimension.FIRST elif image.shape[first_dim] in num_channels: return ChannelDimension.FIRST elif image.shape[last_dim] in num_channels: return ChannelDimension.LAST raise ValueError("Unable to infer channel dimension format") def get_channel_dimension_axis( image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]] = None ) -> int: """ Returns the channel dimension axis of the image. Args: image (`np.ndarray`): The image to get the channel dimension axis of. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the image. If `None`, will infer the channel dimension from the image. Returns: The channel dimension axis of the image. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if input_data_format == ChannelDimension.FIRST: return image.ndim - 3 elif input_data_format == ChannelDimension.LAST: return image.ndim - 1 raise ValueError(f"Unsupported data format: {input_data_format}") def get_image_size(image: np.ndarray, channel_dim: Optional[ChannelDimension] = None) -> tuple[int, int]: """ Returns the (height, width) dimensions of the image. Args: image (`np.ndarray`): The image to get the dimensions of. channel_dim (`ChannelDimension`, *optional*): Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image. Returns: A tuple of the image's height and width. """ if channel_dim is None: channel_dim = infer_channel_dimension_format(image) if channel_dim == ChannelDimension.FIRST: return image.shape[-2], image.shape[-1] elif channel_dim == ChannelDimension.LAST: return image.shape[-3], image.shape[-2] else: raise ValueError(f"Unsupported data format: {channel_dim}") def get_image_size_for_max_height_width( image_size: tuple[int, int], max_height: int, max_width: int, ) -> tuple[int, int]: """ Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio. Important, even if image_height < max_height and image_width < max_width, the image will be resized to at least one of the edges be equal to max_height or max_width. For example: - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50) - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400) Args: image_size (`tuple[int, int]`): The image to resize. max_height (`int`): The maximum allowed height. max_width (`int`): The maximum allowed width. """ height, width = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return new_height, new_width def is_valid_annotation_coco_detection(annotation: dict[str, Union[list, tuple]]) -> bool: if ( isinstance(annotation, dict) and "image_id" in annotation and "annotations" in annotation and isinstance(annotation["annotations"], (list, tuple)) and ( # an image can have no annotations len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict) ) ): return True return False def is_valid_annotation_coco_panoptic(annotation: dict[str, Union[list, tuple]]) -> bool: if ( isinstance(annotation, dict) and "image_id" in annotation and "segments_info" in annotation and "file_name" in annotation and isinstance(annotation["segments_info"], (list, tuple)) and ( # an image can have no segments len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict) ) ): return True return False def valid_coco_detection_annotations(annotations: Iterable[dict[str, Union[list, tuple]]]) -> bool: return all(is_valid_annotation_coco_detection(ann) for ann in annotations) def valid_coco_panoptic_annotations(annotations: Iterable[dict[str, Union[list, tuple]]]) -> bool: return all(is_valid_annotation_coco_panoptic(ann) for ann in annotations) def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = None) -> "PIL.Image.Image": """ Loads `image` to a PIL Image. Args: image (`str` or `PIL.Image.Image`): The image to convert to the PIL Image format. timeout (`float`, *optional*): The timeout value in seconds for the URL request. Returns: `PIL.Image.Image`: A PIL Image. """ requires_backends(load_image, ["vision"]) if isinstance(image, str): if image.startswith("http://") or image.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png image = PIL.Image.open(BytesIO(httpx.get(image, timeout=timeout, follow_redirects=True).content)) elif os.path.isfile(image): image = PIL.Image.open(image) else: if image.startswith("data:image/"): image = image.split(",")[1] # Try to load as base64 try: b64 = base64.decodebytes(image.encode()) image = PIL.Image.open(BytesIO(b64)) except Exception as e: raise ValueError( f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}" ) elif not isinstance(image, PIL.Image.Image): raise TypeError( "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image." ) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image def load_images( images: Union[list, tuple, str, "PIL.Image.Image"], timeout: Optional[float] = None ) -> Union["PIL.Image.Image", list["PIL.Image.Image"], list[list["PIL.Image.Image"]]]: """Loads images, handling different levels of nesting. Args: images: A single image, a list of images, or a list of lists of images to load. timeout: Timeout for loading images. Returns: A single image, a list of images, a list of lists of images. """ if isinstance(images, (list, tuple)): if len(images) and isinstance(images[0], (list, tuple)): return [[load_image(image, timeout=timeout) for image in image_group] for image_group in images] else: return [load_image(image, timeout=timeout) for image in images] else: return load_image(images, timeout=timeout) def validate_preprocess_arguments( do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_pad: Optional[bool] = None, pad_size: Optional[Union[dict[str, int], int]] = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional["PILImageResampling"] = None, interpolation: Optional["InterpolationMode"] = None, ): """ Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method. Raises `ValueError` if arguments incompatibility is caught. Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`, sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow existing arguments when possible. """ if do_rescale and rescale_factor is None: raise ValueError("`rescale_factor` must be specified if `do_rescale` is `True`.") if do_pad and pad_size is None: # Processors pad images using different args depending on the model, so the below check is pointless # but we keep it for BC for now. TODO: remove in v5 # Usually padding can be called with: # - "pad_size/size" if we're padding to specific values # - "size_divisor" if we're padding to any value divisible by X # - "None" if we're padding to the maximum size image in batch raise ValueError( "Depending on the model, `size_divisor` or `pad_size` or `size` must be specified if `do_pad` is `True`." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.") if do_center_crop and crop_size is None: raise ValueError("`crop_size` must be specified if `do_center_crop` is `True`.") if interpolation is not None and resample is not None: raise ValueError( "Only one of `interpolation` and `resample` should be specified, depending on image processor type." ) if do_resize and not (size is not None and (resample is not None or interpolation is not None)): raise ValueError("`size` and `resample/interpolation` must be specified if `do_resize` is `True`.")
ImageType
python
Pylons__pyramid
src/pyramid/i18n.py
{ "start": 8435, "end": 13918 }
class ____(gettext.GNUTranslations): """An extended translation catalog class (ripped off from Babel)""" DEFAULT_DOMAIN = 'messages' def __init__(self, fileobj=None, domain=DEFAULT_DOMAIN): """Initialize the translations catalog. :param fileobj: the file-like object the translation should be read from """ # germanic plural by default; self.plural will be overwritten by # GNUTranslations._parse (called as a side effect if fileobj is # passed to GNUTranslations.__init__) with a "real" self.plural for # this domain; see https://github.com/Pylons/pyramid/issues/235 # It is only overridden the first time a new message file is found # for a given domain, so all message files must have matching plural # rules if they are in the same domain. We keep track of if we have # overridden so we can special case the default domain, which is always # instantiated before a message file is read. # See also https://github.com/Pylons/pyramid/pull/2102 self.plural = DEFAULT_PLURAL gettext.GNUTranslations.__init__(self, fp=fileobj) self.files = list(filter(None, [getattr(fileobj, 'name', None)])) self.domain = domain self._domains = {} @classmethod def load(cls, dirname=None, locales=None, domain=DEFAULT_DOMAIN): """Load translations from the given directory. :param dirname: the directory containing the ``MO`` files :param locales: the list of locales in order of preference (items in this list can be either `Locale` objects or locale strings) :param domain: the message domain :return: the loaded catalog, or a ``NullTranslations`` instance if no matching translations were found :rtype: `Translations` """ if locales is not None: if not isinstance(locales, (list, tuple)): locales = [locales] locales = [str(locale) for locale in locales] if not domain: domain = cls.DEFAULT_DOMAIN filename = gettext.find(domain, dirname, locales) if not filename: return gettext.NullTranslations() with open(filename, 'rb') as fp: return cls(fileobj=fp, domain=domain) def __repr__(self): return '<{}: "{}">'.format( type(self).__name__, self._info.get('project-id-version'), ) def add(self, translations, merge=True): """Add the given translations to the catalog. If the domain of the translations is different than that of the current catalog, they are added as a catalog that is only accessible by the various ``d*gettext`` functions. :param translations: the `Translations` instance with the messages to add :param merge: whether translations for message domains that have already been added should be merged with the existing translations :return: the `Translations` instance (``self``) so that `merge` calls can be easily chained :rtype: `Translations` """ domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN) if domain == self.DEFAULT_DOMAIN and self.plural is DEFAULT_PLURAL: self.plural = translations.plural if merge and domain == self.domain: return self.merge(translations) existing = self._domains.get(domain) if merge and existing is not None: existing.merge(translations) else: translations.add_fallback(self) self._domains[domain] = translations return self def merge(self, translations): """Merge the given translations into the catalog. Message translations in the specified catalog override any messages with the same identifier in the existing catalog. :param translations: the `Translations` instance with the messages to merge :return: the `Translations` instance (``self``) so that `merge` calls can be easily chained :rtype: `Translations` """ if isinstance(translations, gettext.GNUTranslations): self._catalog.update(translations._catalog) if isinstance(translations, Translations): self.files.extend(translations.files) return self def dgettext(self, domain, message): """Like ``gettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).gettext(message) def dugettext(self, domain, message): """Like ``ugettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).gettext(message) def dngettext(self, domain, singular, plural, num): """Like ``ngettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).ngettext(singular, plural, num) def dungettext(self, domain, singular, plural, num): """Like ``ungettext()`` but look the message up in the specified domain. """ return self._domains.get(domain, self).ngettext(singular, plural, num)
Translations
python
Textualize__textual
src/textual/_parser.py
{ "start": 268, "end": 323 }
class ____(ParseError): """End of Stream."""
ParseEOF
python
pytorch__pytorch
benchmarks/instruction_counts/execution/runner.py
{ "start": 409, "end": 707 }
class ____(Exception): """Raised in the main process when a worker failure is detected.""" def __init__(self, cmd: str, wrapped_trace: Optional[str] = None) -> None: self.cmd: str = cmd self.wrapped_trace: Optional[str] = wrapped_trace super().__init__()
WorkerFailed
python
spack__spack
lib/spack/spack/llnl/util/lang.py
{ "start": 28906, "end": 29770 }
class ____(collections.abc.MutableSequence): """Base class that behaves like a list, just with a different type. Client code can inherit from this base class:: class Foo(TypedMutableSequence): pass and later perform checks based on types:: if isinstance(l, Foo): # do something """ def __init__(self, iterable): self.data = list(iterable) def __getitem__(self, item): return self.data[item] def __setitem__(self, key, value): self.data[key] = value def __delitem__(self, key): del self.data[key] def __len__(self): return len(self.data) def insert(self, index, item): self.data.insert(index, item) def __repr__(self): return repr(self.data) def __str__(self): return str(self.data)
TypedMutableSequence
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
{ "start": 45597, "end": 45924 }
class ____(sqltypes.Text): """MSSQL XML type. This is a placeholder type for reflection purposes that does not include any Python-side datatype support. It also does not currently support additional arguments, such as "CONTENT", "DOCUMENT", "xml_schema_collection". """ __visit_name__ = "XML"
XML
python
keras-team__keras
keras/src/layers/merging/maximum.py
{ "start": 166, "end": 2142 }
class ____(Merge): """Computes element-wise maximum on a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras.layers.Maximum()([x1, x2]) Usage in a Keras model: >>> input1 = keras.layers.Input(shape=(16,)) >>> x1 = keras.layers.Dense(8, activation='relu')(input1) >>> input2 = keras.layers.Input(shape=(32,)) >>> x2 = keras.layers.Dense(8, activation='relu')(input2) >>> # equivalent to `y = keras.layers.maximum([x1, x2])` >>> y = keras.layers.Maximum()([x1, x2]) >>> out = keras.layers.Dense(4)(y) >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) """ def _merge_function(self, inputs): return self._apply_merge_op_and_or_mask(ops.maximum, inputs) @keras_export("keras.layers.maximum") def maximum(inputs, **kwargs): """Functional interface to the `keras.layers.Maximum` layer. Args: inputs: A list of input tensors , all of the same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor as the element-wise product of the inputs with the same shape as the inputs. Examples: >>> input_shape = (2, 3, 4) >>> x1 = np.random.rand(*input_shape) >>> x2 = np.random.rand(*input_shape) >>> y = keras.layers.maximum([x1, x2]) Usage in a Keras model: >>> input1 = keras.layers.Input(shape=(16,)) >>> x1 = keras.layers.Dense(8, activation='relu')(input1) >>> input2 = keras.layers.Input(shape=(32,)) >>> x2 = keras.layers.Dense(8, activation='relu')(input2) >>> y = keras.layers.maximum([x1, x2]) >>> out = keras.layers.Dense(4)(y) >>> model = keras.models.Model(inputs=[input1, input2], outputs=out) """ return Maximum(**kwargs)(inputs)
Maximum
python
pydantic__pydantic
pydantic/_internal/_generics.py
{ "start": 3377, "end": 22976 }
class ____(TypedDict): origin: type[BaseModel] | None # analogous to typing._GenericAlias.__origin__ args: tuple[Any, ...] # analogous to typing._GenericAlias.__args__ parameters: tuple[TypeVar, ...] # analogous to typing.Generic.__parameters__ def create_generic_submodel( model_name: str, origin: type[BaseModel], args: tuple[Any, ...], params: tuple[Any, ...] ) -> type[BaseModel]: """Dynamically create a submodel of a provided (generic) BaseModel. This is used when producing concrete parametrizations of generic models. This function only *creates* the new subclass; the schema/validators/serialization must be updated to reflect a concrete parametrization elsewhere. Args: model_name: The name of the newly created model. origin: The base class for the new model to inherit from. args: A tuple of generic metadata arguments. params: A tuple of generic metadata parameters. Returns: The created submodel. """ namespace: dict[str, Any] = {'__module__': origin.__module__} bases = (origin,) meta, ns, kwds = prepare_class(model_name, bases) namespace.update(ns) created_model = meta( model_name, bases, namespace, __pydantic_generic_metadata__={ 'origin': origin, 'args': args, 'parameters': params, }, __pydantic_reset_parent_namespace__=False, **kwds, ) model_module, called_globally = _get_caller_frame_info(depth=3) if called_globally: # create global reference and therefore allow pickling object_by_reference = None reference_name = model_name reference_module_globals = sys.modules[created_model.__module__].__dict__ while object_by_reference is not created_model: object_by_reference = reference_module_globals.setdefault(reference_name, created_model) reference_name += '_' return created_model def _get_caller_frame_info(depth: int = 2) -> tuple[str | None, bool]: """Used inside a function to check whether it was called globally. Args: depth: The depth to get the frame. Returns: A tuple contains `module_name` and `called_globally`. Raises: RuntimeError: If the function is not called inside a function. """ try: previous_caller_frame = sys._getframe(depth) except ValueError as e: raise RuntimeError('This function must be used inside another function') from e except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it return None, False frame_globals = previous_caller_frame.f_globals return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals DictValues: type[Any] = {}.values().__class__ def iter_contained_typevars(v: Any) -> Iterator[TypeVar]: """Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found. This is inspired as an alternative to directly accessing the `__parameters__` attribute of a GenericAlias, since __parameters__ of (nested) generic BaseModel subclasses won't show up in that list. """ if isinstance(v, TypeVar): yield v elif is_model_class(v): yield from v.__pydantic_generic_metadata__['parameters'] elif isinstance(v, (DictValues, list)): for var in v: yield from iter_contained_typevars(var) else: args = get_args(v) for arg in args: yield from iter_contained_typevars(arg) def get_args(v: Any) -> Any: pydantic_generic_metadata: PydanticGenericMetadata | None = getattr(v, '__pydantic_generic_metadata__', None) if pydantic_generic_metadata: return pydantic_generic_metadata.get('args') return typing_extensions.get_args(v) def get_origin(v: Any) -> Any: pydantic_generic_metadata: PydanticGenericMetadata | None = getattr(v, '__pydantic_generic_metadata__', None) if pydantic_generic_metadata: return pydantic_generic_metadata.get('origin') return typing_extensions.get_origin(v) def get_standard_typevars_map(cls: Any) -> dict[TypeVar, Any] | None: """Package a generic type's typevars and parametrization (if present) into a dictionary compatible with the `replace_types` function. Specifically, this works with standard typing generics and typing._GenericAlias. """ origin = get_origin(cls) if origin is None: return None if not hasattr(origin, '__parameters__'): return None # In this case, we know that cls is a _GenericAlias, and origin is the generic type # So it is safe to access cls.__args__ and origin.__parameters__ args: tuple[Any, ...] = cls.__args__ # type: ignore parameters: tuple[TypeVar, ...] = origin.__parameters__ return dict(zip(parameters, args)) def get_model_typevars_map(cls: type[BaseModel]) -> dict[TypeVar, Any]: """Package a generic BaseModel's typevars and concrete parametrization (if present) into a dictionary compatible with the `replace_types` function. Since BaseModel.__class_getitem__ does not produce a typing._GenericAlias, and the BaseModel generic info is stored in the __pydantic_generic_metadata__ attribute, we need special handling here. """ # TODO: This could be unified with `get_standard_typevars_map` if we stored the generic metadata # in the __origin__, __args__, and __parameters__ attributes of the model. generic_metadata = cls.__pydantic_generic_metadata__ origin = generic_metadata['origin'] args = generic_metadata['args'] if not args: # No need to go into `iter_contained_typevars`: return {} return dict(zip(iter_contained_typevars(origin), args)) def replace_types(type_: Any, type_map: Mapping[TypeVar, Any] | None) -> Any: """Return type with all occurrences of `type_map` keys recursively replaced with their values. Args: type_: The class or generic alias. type_map: Mapping from `TypeVar` instance to concrete types. Returns: A new type representing the basic structure of `type_` with all `typevar_map` keys recursively replaced. Example: ```python from typing import Union from pydantic._internal._generics import replace_types replace_types(tuple[str, Union[list[str], float]], {str: int}) #> tuple[int, Union[list[int], float]] ``` """ if not type_map: return type_ type_args = get_args(type_) origin_type = get_origin(type_) if typing_objects.is_annotated(origin_type): annotated_type, *annotations = type_args annotated_type = replace_types(annotated_type, type_map) # TODO remove parentheses when we drop support for Python 3.10: return Annotated[(annotated_type, *annotations)] # Having type args is a good indicator that this is a typing special form # instance or a generic alias of some sort. if type_args: resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args) if all_identical(type_args, resolved_type_args): # If all arguments are the same, there is no need to modify the # type or create a new object at all return type_ if ( origin_type is not None and isinstance(type_, _typing_extra.typing_base) and not isinstance(origin_type, _typing_extra.typing_base) and getattr(type_, '_name', None) is not None ): # In python < 3.9 generic aliases don't exist so any of these like `list`, # `type` or `collections.abc.Callable` need to be translated. # See: https://www.python.org/dev/peps/pep-0585 origin_type = getattr(typing, type_._name) assert origin_type is not None if is_union_origin(origin_type): if any(typing_objects.is_any(arg) for arg in resolved_type_args): # `Any | T` ~ `Any`: resolved_type_args = (Any,) # `Never | T` ~ `T`: resolved_type_args = tuple( arg for arg in resolved_type_args if not (typing_objects.is_noreturn(arg) or typing_objects.is_never(arg)) ) # PEP-604 syntax (Ex.: list | str) is represented with a types.UnionType object that does not have __getitem__. # We also cannot use isinstance() since we have to compare types. if sys.version_info >= (3, 10) and origin_type is types.UnionType: return reduce(operator.or_, resolved_type_args) # NotRequired[T] and Required[T] don't support tuple type resolved_type_args, hence the condition below return origin_type[resolved_type_args[0] if len(resolved_type_args) == 1 else resolved_type_args] # We handle pydantic generic models separately as they don't have the same # semantics as "typing" classes or generic aliases if not origin_type and is_model_class(type_): parameters = type_.__pydantic_generic_metadata__['parameters'] if not parameters: return type_ resolved_type_args = tuple(replace_types(t, type_map) for t in parameters) if all_identical(parameters, resolved_type_args): return type_ return type_[resolved_type_args] # Handle special case for typehints that can have lists as arguments. # `typing.Callable[[int, str], int]` is an example for this. if isinstance(type_, list): resolved_list = [replace_types(element, type_map) for element in type_] if all_identical(type_, resolved_list): return type_ return resolved_list # If all else fails, we try to resolve the type directly and otherwise just # return the input with no modifications. return type_map.get(type_, type_) def map_generic_model_arguments(cls: type[BaseModel], args: tuple[Any, ...]) -> dict[TypeVar, Any]: """Return a mapping between the parameters of a generic model and the provided arguments during parameterization. Raises: TypeError: If the number of arguments does not match the parameters (i.e. if providing too few or too many arguments). Example: ```python {test="skip" lint="skip"} class Model[T, U, V = int](BaseModel): ... map_generic_model_arguments(Model, (str, bytes)) #> {T: str, U: bytes, V: int} map_generic_model_arguments(Model, (str,)) #> TypeError: Too few arguments for <class '__main__.Model'>; actual 1, expected at least 2 map_generic_model_arguments(Model, (str, bytes, int, complex)) #> TypeError: Too many arguments for <class '__main__.Model'>; actual 4, expected 3 ``` Note: This function is analogous to the private `typing._check_generic_specialization` function. """ parameters = cls.__pydantic_generic_metadata__['parameters'] expected_len = len(parameters) typevars_map: dict[TypeVar, Any] = {} _missing = object() for parameter, argument in zip_longest(parameters, args, fillvalue=_missing): if parameter is _missing: raise TypeError(f'Too many arguments for {cls}; actual {len(args)}, expected {expected_len}') if argument is _missing: param = cast(TypeVar, parameter) try: has_default = param.has_default() # pyright: ignore[reportAttributeAccessIssue] except AttributeError: # Happens if using `typing.TypeVar` (and not `typing_extensions`) on Python < 3.13. has_default = False if has_default: # The default might refer to other type parameters. For an example, see: # https://typing.python.org/en/latest/spec/generics.html#type-parameters-as-parameters-to-generics typevars_map[param] = replace_types(param.__default__, typevars_map) # pyright: ignore[reportAttributeAccessIssue] else: expected_len -= sum(hasattr(p, 'has_default') and p.has_default() for p in parameters) # pyright: ignore[reportAttributeAccessIssue] raise TypeError(f'Too few arguments for {cls}; actual {len(args)}, expected at least {expected_len}') else: param = cast(TypeVar, parameter) typevars_map[param] = argument return typevars_map _generic_recursion_cache: ContextVar[set[str] | None] = ContextVar('_generic_recursion_cache', default=None) @contextmanager def generic_recursion_self_type( origin: type[BaseModel], args: tuple[Any, ...] ) -> Iterator[PydanticRecursiveRef | None]: """This contextmanager should be placed around the recursive calls used to build a generic type, and accept as arguments the generic origin type and the type arguments being passed to it. If the same origin and arguments are observed twice, it implies that a self-reference placeholder can be used while building the core schema, and will produce a schema_ref that will be valid in the final parent schema. """ previously_seen_type_refs = _generic_recursion_cache.get() if previously_seen_type_refs is None: previously_seen_type_refs = set() token = _generic_recursion_cache.set(previously_seen_type_refs) else: token = None try: type_ref = get_type_ref(origin, args_override=args) if type_ref in previously_seen_type_refs: self_type = PydanticRecursiveRef(type_ref=type_ref) yield self_type else: previously_seen_type_refs.add(type_ref) yield previously_seen_type_refs.remove(type_ref) finally: if token: _generic_recursion_cache.reset(token) def recursively_defined_type_refs() -> set[str]: visited = _generic_recursion_cache.get() if not visited: return set() # not in a generic recursion, so there are no types return visited.copy() # don't allow modifications def get_cached_generic_type_early(parent: type[BaseModel], typevar_values: Any) -> type[BaseModel] | None: """The use of a two-stage cache lookup approach was necessary to have the highest performance possible for repeated calls to `__class_getitem__` on generic types (which may happen in tighter loops during runtime), while still ensuring that certain alternative parametrizations ultimately resolve to the same type. As a concrete example, this approach was necessary to make Model[List[T]][int] equal to Model[List[int]]. The approach could be modified to not use two different cache keys at different points, but the _early_cache_key is optimized to be as quick to compute as possible (for repeated-access speed), and the _late_cache_key is optimized to be as "correct" as possible, so that two types that will ultimately be the same after resolving the type arguments will always produce cache hits. If we wanted to move to only using a single cache key per type, we would either need to always use the slower/more computationally intensive logic associated with _late_cache_key, or would need to accept that Model[List[T]][int] is a different type than Model[List[T]][int]. Because we rely on subclass relationships during validation, I think it is worthwhile to ensure that types that are functionally equivalent are actually equal. """ return _GENERIC_TYPES_CACHE.get(_early_cache_key(parent, typevar_values)) def get_cached_generic_type_late( parent: type[BaseModel], typevar_values: Any, origin: type[BaseModel], args: tuple[Any, ...] ) -> type[BaseModel] | None: """See the docstring of `get_cached_generic_type_early` for more information about the two-stage cache lookup.""" cached = _GENERIC_TYPES_CACHE.get(_late_cache_key(origin, args, typevar_values)) if cached is not None: set_cached_generic_type(parent, typevar_values, cached, origin, args) return cached def set_cached_generic_type( parent: type[BaseModel], typevar_values: tuple[Any, ...], type_: type[BaseModel], origin: type[BaseModel] | None = None, args: tuple[Any, ...] | None = None, ) -> None: """See the docstring of `get_cached_generic_type_early` for more information about why items are cached with two different keys. """ _GENERIC_TYPES_CACHE[_early_cache_key(parent, typevar_values)] = type_ if len(typevar_values) == 1: _GENERIC_TYPES_CACHE[_early_cache_key(parent, typevar_values[0])] = type_ if origin and args: _GENERIC_TYPES_CACHE[_late_cache_key(origin, args, typevar_values)] = type_ def _union_orderings_key(typevar_values: Any) -> Any: """This is intended to help differentiate between Union types with the same arguments in different order. Thanks to caching internal to the `typing` module, it is not possible to distinguish between List[Union[int, float]] and List[Union[float, int]] (and similarly for other "parent" origins besides List) because `typing` considers Union[int, float] to be equal to Union[float, int]. However, you _can_ distinguish between (top-level) Union[int, float] vs. Union[float, int]. Because we parse items as the first Union type that is successful, we get slightly more consistent behavior if we make an effort to distinguish the ordering of items in a union. It would be best if we could _always_ get the exact-correct order of items in the union, but that would require a change to the `typing` module itself. (See https://github.com/python/cpython/issues/86483 for reference.) """ if isinstance(typevar_values, tuple): return tuple(_union_orderings_key(value) for value in typevar_values) elif typing_objects.is_union(typing_extensions.get_origin(typevar_values)): return get_args(typevar_values) else: return () def _early_cache_key(cls: type[BaseModel], typevar_values: Any) -> GenericTypesCacheKey: """This is intended for minimal computational overhead during lookups of cached types. Note that this is overly simplistic, and it's possible that two different cls/typevar_values inputs would ultimately result in the same type being created in BaseModel.__class_getitem__. To handle this, we have a fallback _late_cache_key that is checked later if the _early_cache_key lookup fails, and should result in a cache hit _precisely_ when the inputs to __class_getitem__ would result in the same type. """ return cls, typevar_values, _union_orderings_key(typevar_values) def _late_cache_key(origin: type[BaseModel], args: tuple[Any, ...], typevar_values: Any) -> GenericTypesCacheKey: """This is intended for use later in the process of creating a new type, when we have more information about the exact args that will be passed. If it turns out that a different set of inputs to __class_getitem__ resulted in the same inputs to the generic type creation process, we can still return the cached type, and update the cache with the _early_cache_key as well. """ # The _union_orderings_key is placed at the start here to ensure there cannot be a collision with an # _early_cache_key, as that function will always produce a BaseModel subclass as the first item in the key, # whereas this function will always produce a tuple as the first item in the key. return _union_orderings_key(typevar_values), origin, args
PydanticGenericMetadata
python
gevent__gevent
src/gevent/tests/test__threadpool.py
{ "start": 20069, "end": 21383 }
class ____(greentest.TestCase): def test_exception_in_on_async_doesnt_crash(self): # Issue 1482. An FFI-based loop could crash the whole process # by dereferencing a handle after it was closed. called = [] class MyException(Exception): pass def bad_when_ready(): called.append(1) raise MyException tr = gevent.threadpool.ThreadResult(None, gevent.get_hub(), bad_when_ready) def wake(): called.append(1) tr.set(42) gevent.spawn(wake).get() # Spin the loop a few times to make sure we run the callbacks. # If we neglect to spin, we don't trigger the bug. # If error handling is correct, the exception raised from the callback # will be surfaced in the main greenlet. On windows, it can sometimes take # more than one spin for some reason; if we don't catch it here, then # some other test is likely to die unexpectedly with MyException. with self.assertRaises(MyException): for _ in range(5): gevent.sleep(0.001) self.assertEqual(called, [1, 1]) # But value was cleared in a finally block self.assertIsNone(tr.value) self.assertIsNotNone(tr.receiver)
TestThreadResult
python
modin-project__modin
asv_bench/benchmarks/benchmarks.py
{ "start": 35825, "end": 36069 }
class ____(BaseCategories): params = [get_benchmark_shapes("TimeSetCategories")] param_names = ["shape"] def time_set_categories(self, shape): execute(self.ts.cat.set_categories(self.ts.cat.categories[::2]))
TimeSetCategories
python
bokeh__bokeh
src/bokeh/core/property/numeric.py
{ "start": 2115, "end": 2659 }
class ____(SingleParameterizedProperty[T]): """ A property accepting a value of some other type while having undefined default. """ def __init__(self, type_param: TypeOrInst[Property[T]], *, default: Init[T] = Intrinsic, help: str | None = None) -> None: super().__init__(type_param, default=default, help=help) def validate(self, value: Any, detail: bool = True) -> None: super().validate(value, detail) if not (0 < value): raise ValueError(f"expected a positive number, got {value!r}")
Positive
python
apache__airflow
providers/standard/tests/unit/standard/sensors/test_time_delta.py
{ "start": 5274, "end": 8641 }
class ____: def setup_method(self): self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True) self.args = {"owner": "airflow", "start_date": DEFAULT_DATE} self.dag = DAG(TEST_DAG_ID, schedule=timedelta(days=1), default_args=self.args) @pytest.mark.parametrize( "should_defer", [False, True], ) def test_timedelta_sensor(self, mocker, should_defer): defer_mock = mocker.patch(DEFER_PATH) delta = timedelta(hours=1) with pytest.warns(AirflowProviderDeprecationWarning): op = TimeDeltaSensorAsync(task_id="timedelta_sensor_check", delta=delta, dag=self.dag) if should_defer: data_interval_end = pendulum.now("UTC").add(hours=1) else: data_interval_end = pendulum.now("UTC").replace(microsecond=0, second=0, minute=0).add(hours=-1) op.execute({"data_interval_end": data_interval_end}) if should_defer: defer_mock.assert_called_once() else: defer_mock.assert_not_called() @pytest.mark.parametrize( "should_defer", [False, True], ) def test_wait_sensor(self, mocker, should_defer): defer_mock = mocker.patch(DEFER_PATH) sleep_mock = mocker.patch("airflow.providers.standard.sensors.time_delta.sleep") wait_time = timedelta(seconds=30) op = WaitSensor( task_id="wait_sensor_check", time_to_wait=wait_time, dag=self.dag, deferrable=should_defer ) with time_machine.travel(pendulum.datetime(year=2024, month=8, day=1, tz="UTC"), tick=False): op.execute({}) if should_defer: defer_mock.assert_called_once() else: defer_mock.assert_not_called() sleep_mock.assert_called_once_with(30) @pytest.mark.parametrize( ("run_after", "interval_end"), [ (timezone.utcnow() + timedelta(days=1), timezone.utcnow() + timedelta(days=2)), (timezone.utcnow() + timedelta(days=1), None), ], ) def test_timedelta_sensor_async_run_after_vs_interval(self, run_after, interval_end, dag_maker): """Interval end should be used as base time when present else run_after""" if not AIRFLOW_V_3_0_PLUS and not interval_end: pytest.skip("not applicable") context = {} if interval_end: context["data_interval_end"] = interval_end with dag_maker() as dag: ... kwargs = {} if AIRFLOW_V_3_0_PLUS: from airflow.utils.types import DagRunTriggeredByType kwargs.update(triggered_by=DagRunTriggeredByType.TEST, run_after=run_after) dr = dag_maker.create_dagrun( run_id="abcrhroceuh", run_type=DagRunType.MANUAL, state=None, **kwargs, ) context.update(dag_run=dr) delta = timedelta(seconds=1) with pytest.warns(AirflowProviderDeprecationWarning): op = TimeDeltaSensorAsync(task_id="wait_sensor_check", delta=delta, dag=dag) base_time = interval_end or run_after expected_time = base_time + delta with pytest.raises(TaskDeferred) as caught: op.execute(context) assert caught.value.trigger.moment == expected_time
TestTimeDeltaSensorAsync
python
plotly__plotly.py
plotly/io/_base_renderers.py
{ "start": 13031, "end": 13648 }
class ____(HtmlRenderer): """ Renderer to display interactive figures in Google Colab Notebooks. This renderer is enabled by default when running in a Colab notebook. mime type: 'text/html' """ def __init__( self, config=None, auto_play=False, post_script=None, animation_opts=None ): super(ColabRenderer, self).__init__( connected=True, full_html=True, global_init=False, config=config, auto_play=auto_play, post_script=post_script, animation_opts=animation_opts, )
ColabRenderer
python
matplotlib__matplotlib
lib/matplotlib/tests/test_units.py
{ "start": 480, "end": 10483 }
class ____: def __init__(self, data, units): self.magnitude = data self.units = units def to(self, new_units): factors = {('hours', 'seconds'): 3600, ('minutes', 'hours'): 1 / 60, ('minutes', 'seconds'): 60, ('feet', 'miles'): 1 / 5280., ('feet', 'inches'): 12, ('miles', 'inches'): 12 * 5280} if self.units != new_units: mult = factors[self.units, new_units] return Quantity(mult * self.magnitude, new_units) else: return Quantity(self.magnitude, self.units) def __copy__(self): return Quantity(self.magnitude, self.units) def __getattr__(self, attr): return getattr(self.magnitude, attr) def __getitem__(self, item): if np.iterable(self.magnitude): return Quantity(self.magnitude[item], self.units) else: return Quantity(self.magnitude, self.units) def __array__(self): return np.asarray(self.magnitude) @pytest.fixture def quantity_converter(): # Create an instance of the conversion interface and # mock so we can check methods called qc = munits.ConversionInterface() def convert(value, unit, axis): if hasattr(value, 'units'): return value.to(unit).magnitude elif np.iterable(value): try: return [v.to(unit).magnitude for v in value] except AttributeError: return [Quantity(v, axis.get_units()).to(unit).magnitude for v in value] else: return Quantity(value, axis.get_units()).to(unit).magnitude def default_units(value, axis): if hasattr(value, 'units'): return value.units elif np.iterable(value): for v in value: if hasattr(v, 'units'): return v.units return None qc.convert = MagicMock(side_effect=convert) qc.axisinfo = MagicMock(side_effect=lambda u, a: munits.AxisInfo(label=u, default_limits=(0, 100))) qc.default_units = MagicMock(side_effect=default_units) return qc # Tests that the conversion machinery works properly for classes that # work as a facade over numpy arrays (like pint) @image_comparison(['plot_pint.png'], style='mpl20', tol=0 if platform.machine() == 'x86_64' else 0.03) def test_numpy_facade(quantity_converter): # use former defaults to match existing baseline image plt.rcParams['axes.formatter.limits'] = -7, 7 # Register the class munits.registry[Quantity] = quantity_converter # Simple test y = Quantity(np.linspace(0, 30), 'miles') x = Quantity(np.linspace(0, 5), 'hours') fig, ax = plt.subplots() fig.subplots_adjust(left=0.15) # Make space for label ax.plot(x, y, 'tab:blue') ax.axhline(Quantity(26400, 'feet'), color='tab:red') ax.axvline(Quantity(120, 'minutes'), color='tab:green') ax.yaxis.set_units('inches') ax.xaxis.set_units('seconds') assert quantity_converter.convert.called assert quantity_converter.axisinfo.called assert quantity_converter.default_units.called # Tests gh-8908 @image_comparison(['plot_masked_units.png'], remove_text=True, style='mpl20', tol=0 if platform.machine() == 'x86_64' else 0.02) def test_plot_masked_units(): data = np.linspace(-5, 5) data_masked = np.ma.array(data, mask=(data > -2) & (data < 2)) data_masked_units = Quantity(data_masked, 'meters') fig, ax = plt.subplots() ax.plot(data_masked_units) def test_empty_set_limits_with_units(quantity_converter): # Register the class munits.registry[Quantity] = quantity_converter fig, ax = plt.subplots() ax.set_xlim(Quantity(-1, 'meters'), Quantity(6, 'meters')) ax.set_ylim(Quantity(-1, 'hours'), Quantity(16, 'hours')) @image_comparison(['jpl_bar_units.png'], savefig_kwarg={'dpi': 120}, style='mpl20') def test_jpl_bar_units(): import matplotlib.testing.jpl_units as units units.register() day = units.Duration("ET", 24.0 * 60.0 * 60.0) x = [0 * units.km, 1 * units.km, 2 * units.km] w = [1 * day, 2 * day, 3 * day] b = units.Epoch("ET", dt=datetime(2009, 4, 26)) fig, ax = plt.subplots() ax.bar(x, w, bottom=b) ax.set_ylim([b - 1 * day, b + w[-1] + (1.001) * day]) @image_comparison(['jpl_barh_units.png'], savefig_kwarg={'dpi': 120}, style='mpl20') def test_jpl_barh_units(): import matplotlib.testing.jpl_units as units units.register() day = units.Duration("ET", 24.0 * 60.0 * 60.0) x = [0 * units.km, 1 * units.km, 2 * units.km] w = [1 * day, 2 * day, 3 * day] b = units.Epoch("ET", dt=datetime(2009, 4, 26)) fig, ax = plt.subplots() ax.barh(x, w, left=b) ax.set_xlim([b - 1 * day, b + w[-1] + (1.001) * day]) def test_jpl_datetime_units_consistent(): import matplotlib.testing.jpl_units as units units.register() dt = datetime(2009, 4, 26) jpl = units.Epoch("ET", dt=dt) dt_conv = munits.registry.get_converter(dt).convert(dt, None, None) jpl_conv = munits.registry.get_converter(jpl).convert(jpl, None, None) assert dt_conv == jpl_conv def test_empty_arrays(): # Check that plotting an empty array with a dtype works plt.scatter(np.array([], dtype='datetime64[ns]'), np.array([])) def test_scatter_element0_masked(): times = np.arange('2005-02', '2005-03', dtype='datetime64[D]') y = np.arange(len(times), dtype=float) y[0] = np.nan fig, ax = plt.subplots() ax.scatter(times, y) fig.canvas.draw() def test_errorbar_mixed_units(): x = np.arange(10) y = [datetime(2020, 5, i * 2 + 1) for i in x] fig, ax = plt.subplots() ax.errorbar(x, y, timedelta(days=0.5)) fig.canvas.draw() @check_figures_equal() def test_subclass(fig_test, fig_ref): class subdate(datetime): pass fig_test.subplots().plot(subdate(2000, 1, 1), 0, "o") fig_ref.subplots().plot(datetime(2000, 1, 1), 0, "o") def test_shared_axis_quantity(quantity_converter): munits.registry[Quantity] = quantity_converter x = Quantity(np.linspace(0, 1, 10), "hours") y1 = Quantity(np.linspace(1, 2, 10), "feet") y2 = Quantity(np.linspace(3, 4, 10), "feet") fig, (ax1, ax2) = plt.subplots(2, 1, sharex='all', sharey='all') ax1.plot(x, y1) ax2.plot(x, y2) assert ax1.xaxis.get_units() == ax2.xaxis.get_units() == "hours" assert ax2.yaxis.get_units() == ax2.yaxis.get_units() == "feet" ax1.xaxis.set_units("seconds") ax2.yaxis.set_units("inches") assert ax1.xaxis.get_units() == ax2.xaxis.get_units() == "seconds" assert ax1.yaxis.get_units() == ax2.yaxis.get_units() == "inches" def test_shared_axis_datetime(): # datetime uses dates.DateConverter y1 = [datetime(2020, i, 1, tzinfo=timezone.utc) for i in range(1, 13)] y2 = [datetime(2021, i, 1, tzinfo=timezone.utc) for i in range(1, 13)] fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(y1) ax2.plot(y2) ax1.yaxis.set_units(timezone(timedelta(hours=5))) assert ax2.yaxis.units == timezone(timedelta(hours=5)) def test_shared_axis_categorical(): # str uses category.StrCategoryConverter d1 = {"a": 1, "b": 2} d2 = {"a": 3, "b": 4} fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True) ax1.plot(d1.keys(), d1.values()) ax2.plot(d2.keys(), d2.values()) ax1.xaxis.set_units(UnitData(["c", "d"])) assert "c" in ax2.xaxis.get_units()._mapping.keys() def test_explicit_converter(): d1 = {"a": 1, "b": 2} str_cat_converter = StrCategoryConverter() str_cat_converter_2 = StrCategoryConverter() date_converter = DateConverter() # Explicit is set fig1, ax1 = plt.subplots() ax1.xaxis.set_converter(str_cat_converter) assert ax1.xaxis.get_converter() == str_cat_converter # Explicit not overridden by implicit ax1.plot(d1.keys(), d1.values()) assert ax1.xaxis.get_converter() == str_cat_converter # No error when called twice with equivalent input ax1.xaxis.set_converter(str_cat_converter) # Error when explicit called twice with pytest.raises(RuntimeError): ax1.xaxis.set_converter(str_cat_converter_2) fig2, ax2 = plt.subplots() ax2.plot(d1.keys(), d1.values()) # No error when equivalent type is used ax2.xaxis.set_converter(str_cat_converter) fig3, ax3 = plt.subplots() ax3.plot(d1.keys(), d1.values()) # Warn when implicit overridden with pytest.warns(): ax3.xaxis.set_converter(date_converter) def test_empty_default_limits(quantity_converter): munits.registry[Quantity] = quantity_converter fig, ax1 = plt.subplots() ax1.xaxis.update_units(Quantity([10], "miles")) fig.draw_without_rendering() assert ax1.get_xlim() == (0, 100) ax1.yaxis.update_units(Quantity([10], "miles")) fig.draw_without_rendering() assert ax1.get_ylim() == (0, 100) fig, ax = plt.subplots() ax.axhline(30) ax.plot(Quantity(np.arange(0, 3), "miles"), Quantity(np.arange(0, 6, 2), "feet")) fig.draw_without_rendering() assert ax.get_xlim() == (0, 2) assert ax.get_ylim() == (0, 30) fig, ax = plt.subplots() ax.axvline(30) ax.plot(Quantity(np.arange(0, 3), "miles"), Quantity(np.arange(0, 6, 2), "feet")) fig.draw_without_rendering() assert ax.get_xlim() == (0, 30) assert ax.get_ylim() == (0, 4) fig, ax = plt.subplots() ax.xaxis.update_units(Quantity([10], "miles")) ax.axhline(30) fig.draw_without_rendering() assert ax.get_xlim() == (0, 100) assert ax.get_ylim() == (28.5, 31.5) fig, ax = plt.subplots() ax.yaxis.update_units(Quantity([10], "miles")) ax.axvline(30) fig.draw_without_rendering() assert ax.get_ylim() == (0, 100) assert ax.get_xlim() == (28.5, 31.5) # test array-like objects...
Quantity
python
walkccc__LeetCode
solutions/3411. Maximum Subarray With Equal Products/3411.py
{ "start": 0, "end": 352 }
class ____: def maxLength(self, nums: list[int]) -> int: n = len(nums) ans = 0 for i in range(n): prod = 1 l = 1 g = 0 for j in range(i, n): prod *= nums[j] l = math.lcm(l, nums[j]) g = math.gcd(g, nums[j]) if prod == l * g: ans = max(ans, j - i + 1) return ans
Solution
python
fsspec__filesystem_spec
fsspec/tests/test_async.py
{ "start": 4494, "end": 7125 }
class ____(fsspec.asyn.AbstractAsyncStreamedFile): def __init__(self, fs, path, mode, block_size, autocommit, **kwargs): super().__init__(fs, path, mode, block_size, autocommit, **kwargs) self.temp_buffer = io.BytesIO(b"foo-bar" * 20) async def _fetch_range(self, start, end): return self.temp_buffer.read(end - start) async def _initiate_upload(self): # Reinitialize for new uploads. self.temp_buffer = io.BytesIO() async def _upload_chunk(self, final=False): self.temp_buffer.write(self.buffer.getbuffer()) async def get_data(self): return self.temp_buffer.getbuffer().tobytes() @pytest.mark.asyncio async def test_async_streamed_file_write(): test_fs = DummyAsyncFS() streamed_file = await test_fs.open_async("misc/foo.txt", mode="wb") inp_data = b"foo-bar" * streamed_file.blocksize * 2 await streamed_file.write(inp_data) assert streamed_file.loc == len(inp_data) await streamed_file.close() out_data = await streamed_file.get_data() assert out_data.count(b"foo-bar") == streamed_file.blocksize * 2 @pytest.mark.asyncio async def test_async_streamed_file_read(): test_fs = DummyAsyncFS() streamed_file = await test_fs.open_async("misc/foo.txt", mode="rb") assert ( await streamed_file.read(7 * 3) + await streamed_file.read(7 * 18) == b"foo-bar" * 20 ) await streamed_file.close() def test_rm_file_with_rm_implementation(): class AsyncFSWithRm(fsspec.asyn.AsyncFileSystem): def __init__(self, **kwargs): super().__init__(**kwargs) self.removed_paths = [] async def _rm(self, path, recursive=False, batch_size=None, **kwargs): if isinstance(path, str): path = [path] for p in path: self.removed_paths.append(p) return None fs = AsyncFSWithRm() fs.rm_file("test/file.txt") assert "test/file.txt" in fs.removed_paths def test_rm_file_with_rm_file_implementation(): class AsyncFSWithRmFile(fsspec.asyn.AsyncFileSystem): def __init__(self, **kwargs): super().__init__(**kwargs) self.removed_paths = [] async def _rm_file(self, path, **kwargs): self.removed_paths.append(path) return None fs = AsyncFSWithRmFile() fs.rm_file("test/file.txt") assert "test/file.txt" in fs.removed_paths def test_rm_file_without_implementation(): fs = fsspec.asyn.AsyncFileSystem() with pytest.raises(NotImplementedError): fs.rm_file("test/file.txt")
DummyAsyncStreamedFile
python
django__django
tests/unmanaged_models/models.py
{ "start": 1286, "end": 1607 }
class ____(models.Model): class Meta: db_table = "b01" managed = False fk_a = models.ForeignKey(A02, models.CASCADE) f_a = models.CharField(max_length=10, db_index=True) f_b = models.IntegerField() # To re-use the many-to-many intermediate table, we need to manually set up # things up.
B02
python
doocs__leetcode
lcci/01.02.Check Permutation/Solution.py
{ "start": 0, "end": 116 }
class ____: def CheckPermutation(self, s1: str, s2: str) -> bool: return Counter(s1) == Counter(s2)
Solution
python
getsentry__sentry
src/sentry/tsdb/redissnuba.py
{ "start": 3483, "end": 4702 }
class ____(BaseTSDB, metaclass=RedisSnubaTSDBMeta): def __init__(self, switchover_timestamp=None, **options): """ A TSDB backend that uses the Snuba outcomes and events datasets as far as possible instead of reading/writing to redis. Reading will trigger a Snuba query, while writing is a noop as Snuba reads from outcomes. Note: Using this backend requires you to start Snuba outcomes consumers (not to be confused with the outcomes consumers in Sentry itself). :param switchover_timestamp: When set, only start reading from snuba after this timestamp (as returned by `time.time()`). When this timestamp has not been reached yet, this backend just degrades to Redis for *all* keys. The default `None` will start reading from Snuba immediately and is equivalent to setting a past timestamp. """ self.switchover_timestamp = switchover_timestamp self.backends = { "dummy": DummyTSDB(), "redis": RedisTSDB(**options.pop("redis", {})), "snuba": SnubaTSDB(**options.pop("snuba", {})), } super().__init__(**options)
RedisSnubaTSDB
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/widgets/menus.py
{ "start": 12785, "end": 13419 }
class ____: def __init__( self, text: str = "", handler: Callable[[], None] | None = None, children: list[MenuItem] | None = None, shortcut: Sequence[Keys | str] | None = None, disabled: bool = False, ) -> None: self.text = text self.handler = handler self.children = children or [] self.shortcut = shortcut self.disabled = disabled self.selected_item = 0 @property def width(self) -> int: if self.children: return max(get_cwidth(c.text) for c in self.children) else: return 0
MenuItem
python
getsentry__sentry
src/sentry/integrations/gitlab/integration.py
{ "start": 3814, "end": 7855 }
class ____(RepositoryIntegration, GitlabIssuesSpec, CommitContextIntegration): codeowners_locations = ["CODEOWNERS", ".gitlab/CODEOWNERS", "docs/CODEOWNERS"] @property def integration_name(self) -> str: return IntegrationProviderSlug.GITLAB def get_client(self) -> GitLabApiClient: try: # eagerly populate this just for the error message self.default_identity except Identity.DoesNotExist as e: raise IntegrationConfigurationError("Identity not found.") from e else: return GitLabApiClient(self) # IntegrationInstallation methods def error_message_from_json(self, data): """ Extract error messages from gitlab API errors. Generic errors come in the `error` key while validation errors are generally in `message`. See https://docs.gitlab.com/ee/api/#data-validation-and-error-reporting """ if "message" in data: return data["message"] if "error" in data: return data["error"] # RepositoryIntegration methods def has_repo_access(self, repo: RpcRepository) -> bool: # TODO: define this, used to migrate repositories return False def get_repositories( self, query: str | None = None, page_number_limit: int | None = None ) -> list[dict[str, Any]]: # Note: gitlab projects are the same things as repos everywhere else group = self.get_group_id() resp = self.get_client().search_projects(group, query) return [{"identifier": repo["id"], "name": repo["name_with_namespace"]} for repo in resp] def source_url_matches(self, url: str) -> bool: return url.startswith("https://{}".format(self.model.metadata["domain_name"])) def format_source_url(self, repo: Repository, filepath: str, branch: str | None) -> str: base_url = self.model.metadata["base_url"] repo_name = repo.config["path"] # Must format the url ourselves since `check_file` is a head request # "https://gitlab.com/gitlab-org/gitlab/blob/master/README.md" return f"{base_url}/{repo_name}/blob/{branch}/{filepath}" def extract_branch_from_source_url(self, repo: Repository, url: str) -> str: url = url.replace(f"{repo.url}/-/blob/", "") url = url.replace(f"{repo.url}/blob/", "") branch, _, _ = url.partition("/") return branch def extract_source_path_from_source_url(self, repo: Repository, url: str) -> str: url = url.replace(f"{repo.url}/-/blob/", "") url = url.replace(f"{repo.url}/blob/", "") _, _, source_path = url.partition("/") return source_path # CommitContextIntegration methods def on_create_or_update_comment_error(self, api_error: ApiError, metrics_base: str) -> bool: if api_error.code == 429: metrics.incr( metrics_base.format(integration=self.integration_name, key="error"), tags={"type": "rate_limited_error"}, ) return True return False # Gitlab only functions def get_group_id(self): return self.model.metadata["group_id"] def search_projects(self, query): client = self.get_client() group_id = self.get_group_id() return client.search_projects(group_id, query) # TODO(cathy): define in issue ABC def search_issues(self, query: str | None, **kwargs) -> list[dict[str, Any]]: client = self.get_client() project_id = kwargs["project_id"] iids = kwargs["iids"] resp = client.search_project_issues(project_id, query, iids) assert isinstance(resp, list) return resp def get_pr_comment_workflow(self) -> PRCommentWorkflow: return GitlabPRCommentWorkflow(integration=self) MERGED_PR_COMMENT_BODY_TEMPLATE = """\ ## Issues attributed to commits in this merge request The following issues were detected after merging: {issue_list}""".rstrip()
GitlabIntegration
python
joke2k__faker
tests/providers/test_phone_number.py
{ "start": 2677, "end": 3866 }
class ____: """Test az_AZ phone number provider methods""" @classmethod def setup_class(cls): cls.cellphone_patterns = re.compile( r"\+994\d{9}|0\d{2}-\d{3}-\d{2}-\d{2}|0\d{2} \d{3} \d{2} \d{2}", ) cls.landline_patterns = re.compile( r"0\d{2} \d{3} \d{2} \d{2}", ) def test_phone_number(self, faker, num_samples): for _ in range(num_samples): phone_number = faker.phone_number() assert isinstance(phone_number, str) assert self.cellphone_patterns.fullmatch(phone_number) or self.landline_patterns.fullmatch(phone_number) def test_cellphone_number(self, faker, num_samples): for _ in range(num_samples): cellphone_number = faker.cellphone_number() assert isinstance(cellphone_number, str) assert self.cellphone_patterns.fullmatch(cellphone_number) def test_landline_number(self, faker, num_samples): for _ in range(num_samples): landline_number = faker.landline_number() assert isinstance(landline_number, str) assert self.landline_patterns.fullmatch(landline_number)
TestAzAz
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 19609, "end": 20010 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(10, 10) self.scale = torch.nn.Parameter(torch.randn(10, 10)) self.scale_dup = self.scale def forward(self, x): counter = 0 for _param in self.parameters(): counter += 1 return x * self.scale * counter
ParametersModule5
python
wandb__wandb
wandb/automations/_generated/create_generic_webhook_integration.py
{ "start": 314, "end": 542 }
class ____(GQLResult): create_generic_webhook_integration: Optional[ CreateGenericWebhookIntegrationCreateGenericWebhookIntegration ] = Field(alias="createGenericWebhookIntegration")
CreateGenericWebhookIntegration
python
pytest-dev__pytest
src/_pytest/recwarn.py
{ "start": 5481, "end": 8650 }
class ____(warnings.catch_warnings): """A context manager to record raised warnings. Each recorded warning is an instance of :class:`warnings.WarningMessage`. Adapted from `warnings.catch_warnings`. .. note:: ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated differently; see :ref:`ensuring_function_triggers`. """ def __init__(self, *, _ispytest: bool = False) -> None: check_ispytest(_ispytest) super().__init__(record=True) self._entered = False self._list: list[warnings.WarningMessage] = [] @property def list(self) -> list[warnings.WarningMessage]: """The list of recorded warnings.""" return self._list def __getitem__(self, i: int) -> warnings.WarningMessage: """Get a recorded warning by index.""" return self._list[i] def __iter__(self) -> Iterator[warnings.WarningMessage]: """Iterate through the recorded warnings.""" return iter(self._list) def __len__(self) -> int: """The number of recorded warnings.""" return len(self._list) def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: """Pop the first recorded warning which is an instance of ``cls``, but not an instance of a child class of any other match. Raises ``AssertionError`` if there is no match. """ best_idx: int | None = None for i, w in enumerate(self._list): if w.category == cls: return self._list.pop(i) # exact match, stop looking if issubclass(w.category, cls) and ( best_idx is None or not issubclass(w.category, self._list[best_idx].category) ): best_idx = i if best_idx is not None: return self._list.pop(best_idx) __tracebackhide__ = True raise AssertionError(f"{cls!r} not found in warning list") def clear(self) -> None: """Clear the list of recorded warnings.""" self._list[:] = [] # Type ignored because we basically want the `catch_warnings` generic type # parameter to be ourselves but that is not possible(?). def __enter__(self) -> Self: # type: ignore[override] if self._entered: __tracebackhide__ = True raise RuntimeError(f"Cannot enter {self!r} twice") _list = super().__enter__() # record=True means it's None. assert _list is not None self._list = _list warnings.simplefilter("always") return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: if not self._entered: __tracebackhide__ = True raise RuntimeError(f"Cannot exit {self!r} without entering first") super().__exit__(exc_type, exc_val, exc_tb) # Built-in catch_warnings does not reset entered state so we do it # manually here for this context manager to become reusable. self._entered = False @final
WarningsRecorder
python
pytorch__pytorch
torch/_functorch/_aot_autograd/descriptors.py
{ "start": 15264, "end": 15686 }
class ____: """Describes where an output from an AOTAutograd produced FX graph will eventually be bundled into the final output""" def expr(self) -> str: raise NotImplementedError("Subclasses must implement expr()") def is_grad(self) -> bool: """True if this output is a grad or derived from a grad (e.g., subclass attr)""" return False @dataclasses.dataclass(frozen=True)
AOTOutput
python
marshmallow-code__marshmallow
tests/base.py
{ "start": 6327, "end": 6411 }
class ____(UserSchema): homepage = fields.Url(relative=True)
UserRelativeUrlSchema
python
walkccc__LeetCode
solutions/847. Shortest Path Visiting All Nodes/847.py
{ "start": 0, "end": 546 }
class ____: def shortestPathLength(self, graph: list[list[int]]) -> int: n = len(graph) goal = (1 << n) - 1 q = collections.deque() # (u, state) seen = set() for i in range(n): q.append((i, 1 << i)) step = 0 while q: for _ in range(len(q)): u, state = q.popleft() if state == goal: return step if (u, state) in seen: continue seen.add((u, state)) for v in graph[u]: q.append((v, state | 1 << v)) step += 1 return -1
Solution
python
facebook__pyre-check
client/tests/dataclasses_merge_test.py
{ "start": 727, "end": 889 }
class ____: x: List[int] = field( default_factory=list, metadata={"merge_policy": Policy.PREPEND} ) @dataclass_merge @dataclass(frozen=True)
Prepend
python
openai__openai-python
src/openai/types/responses/response_computer_tool_call_param.py
{ "start": 1105, "end": 1484 }
class ____(TypedDict, total=False): type: Required[Literal["double_click"]] """Specifies the event type. For a double click action, this property is always set to `double_click`. """ x: Required[int] """The x-coordinate where the double click occurred.""" y: Required[int] """The y-coordinate where the double click occurred."""
ActionDoubleClick
python
encode__django-rest-framework
rest_framework/authentication.py
{ "start": 702, "end": 866 }
class ____(CsrfViewMiddleware): def _reject(self, request, reason): # Return the failure reason instead of an HttpResponse return reason
CSRFCheck
python
dagster-io__dagster
python_modules/libraries/dagster-powerbi/dagster_powerbi/resource.py
{ "start": 3475, "end": 16716 }
class ____(ConfigurableResource): """Represents a workspace in PowerBI and provides utilities to interact with the PowerBI API. """ credentials: ResourceDependency[PowerBICredentials] workspace_id: str = Field(..., description="The ID of the PowerBI group to use.") refresh_poll_interval: int = Field( default=5, description="The interval in seconds to poll for refresh status." ) refresh_timeout: int = Field( default=300, description="The maximum time in seconds to wait for a refresh to complete.", ) @cached_property def _api_token(self) -> str: return self.credentials.api_token def _fetch( self, endpoint: str, method: str = "GET", json: Any = None, params: Optional[dict[str, Any]] = None, group_scoped: bool = True, ) -> requests.Response: """Fetch JSON data from the PowerBI API. Raises an exception if the request fails. Args: endpoint (str): The API endpoint to fetch data from. Returns: Dict[str, Any]: The JSON data returned from the API. """ headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self._api_token}", } base_url = f"{BASE_API_URL}/groups/{self.workspace_id}" if group_scoped else BASE_API_URL url = f"{base_url}/{endpoint}" if params: url_parameters = urlencode(params) if params else None url = f"{url}?{url_parameters}" response = requests.request( method=method, url=url, headers=headers, json=json, allow_redirects=True, ) response.raise_for_status() return response def _fetch_json( self, endpoint: str, method: str = "GET", json: Any = None, params: Optional[dict[str, Any]] = None, group_scoped: bool = True, ) -> dict[str, Any]: return self._fetch(endpoint, method, json, group_scoped=group_scoped, params=params).json() @public def trigger_and_poll_refresh(self, dataset_id: str) -> None: """Triggers a refresh of a PowerBI dataset and polls until it completes or fails.""" self.trigger_refresh(dataset_id) self.poll_refresh(dataset_id) @public def trigger_refresh(self, dataset_id: str) -> None: """Triggers a refresh of a PowerBI dataset.""" response = self._fetch( method="POST", endpoint=f"datasets/{dataset_id}/refreshes", json={"notifyOption": "NoNotification"}, group_scoped=True, ) if response.status_code != 202: raise Failure(f"Refresh failed to start: {response.content}") @public def poll_refresh(self, dataset_id: str) -> None: """Polls the refresh status of a PowerBI dataset until it completes or fails.""" status = None start = time.monotonic() while status not in ["Completed", "Failed"]: if time.monotonic() - start > self.refresh_timeout: raise Failure(f"Refresh timed out after {self.refresh_timeout} seconds.") last_refresh = self._fetch_json( f"datasets/{dataset_id}/refreshes", group_scoped=True, )["value"][0] status = last_refresh["status"] time.sleep(self.refresh_poll_interval) if status == "Failed": error = last_refresh.get("serviceExceptionJson") # pyright: ignore[reportPossiblyUnboundVariable] raise Failure(f"Refresh failed: {error}") @cached_method def _get_reports(self) -> Mapping[str, Any]: """Fetches a list of all PowerBI reports in the workspace.""" return self._fetch_json("reports") @cached_method def _get_semantic_models(self) -> Mapping[str, Any]: """Fetches a list of all PowerBI semantic models in the workspace.""" return self._fetch_json("datasets") @cached_method def _get_semantic_model_sources(self, dataset_id: str) -> Mapping[str, Any]: """Fetches a list of all data sources for a given semantic model.""" return self._fetch_json(f"datasets/{dataset_id}/datasources") @cached_method def _get_dashboards(self) -> Mapping[str, Any]: """Fetches a list of all PowerBI dashboards in the workspace.""" return self._fetch_json("dashboards") @cached_method def _get_dashboard_tiles(self, dashboard_id: str) -> Mapping[str, Any]: """Fetches a list of all tiles for a given PowerBI dashboard, including which reports back each tile. """ return self._fetch_json(f"dashboards/{dashboard_id}/tiles") @cached_method def _scan(self) -> Mapping[str, Any]: submission = self._fetch_json( method="POST", endpoint="admin/workspaces/getInfo", group_scoped=False, json={"workspaces": [self.workspace_id]}, params={ "lineage": "true", "datasourceDetails": "true", "datasetSchema": "true", "datasetExpressions": "true", }, ) scan_id = submission["id"] now = get_current_timestamp() start_time = now status = None while status != "Succeeded" and now - start_time < ADMIN_SCAN_TIMEOUT: scan_details = self._fetch_json( endpoint=f"admin/workspaces/scanStatus/{scan_id}", group_scoped=False ) status = scan_details["status"] time.sleep(0.1) now = get_current_timestamp() if status != "Succeeded": raise Failure(f"Scan not successful after {ADMIN_SCAN_TIMEOUT} seconds: {scan_details}") # pyright: ignore[reportPossiblyUnboundVariable] return self._fetch_json( endpoint=f"admin/workspaces/scanResult/{scan_id}", group_scoped=False ) @cached_method def fetch_powerbi_workspace_data(self, use_workspace_scan: bool) -> PowerBIWorkspaceData: """Retrieves all Power BI content from the workspace and returns it as a PowerBIWorkspaceData object. Future work will cache this data to avoid repeated calls to the Power BI API. Args: use_workspace_scan (bool): Whether to scan the entire workspace using admin APIs at once to get all content. Returns: PowerBIWorkspaceData: A snapshot of the Power BI workspace's content. """ if use_workspace_scan: return self._fetch_powerbi_workspace_data_scan() return self._fetch_powerbi_workspace_data_legacy() def _fetch_powerbi_workspace_data_scan(self) -> PowerBIWorkspaceData: scan_result = self._scan() augmented_dashboard_data = scan_result["workspaces"][0]["dashboards"] dashboards = [ PowerBIContentData(content_type=PowerBIContentType.DASHBOARD, properties=data) for data in augmented_dashboard_data ] reports = [ PowerBIContentData(content_type=PowerBIContentType.REPORT, properties=data) for data in scan_result["workspaces"][0]["reports"] ] semantic_models_data = scan_result["workspaces"][0]["datasets"] semantic_models = [ PowerBIContentData(content_type=PowerBIContentType.SEMANTIC_MODEL, properties=dataset) for dataset in semantic_models_data ] return PowerBIWorkspaceData.from_content_data( self.workspace_id, dashboards + reports + semantic_models ) def _fetch_powerbi_workspace_data_legacy(self) -> PowerBIWorkspaceData: dashboard_data = self._get_dashboards()["value"] augmented_dashboard_data = [ {**dashboard, "tiles": self._get_dashboard_tiles(dashboard["id"])["value"]} for dashboard in dashboard_data ] dashboards = [ PowerBIContentData(content_type=PowerBIContentType.DASHBOARD, properties=data) for data in augmented_dashboard_data ] reports = [ PowerBIContentData(content_type=PowerBIContentType.REPORT, properties=data) for data in self._get_reports()["value"] ] semantic_models_data = self._get_semantic_models()["value"] data_sources = [] for dataset in semantic_models_data: dataset_sources = self._get_semantic_model_sources(dataset["id"])["value"] dataset_sources_with_id = [ source if "datasourceId" in source else {"datasourceId": generate_data_source_id(source), **source} for source in dataset_sources ] dataset["sources"] = [source["datasourceId"] for source in dataset_sources_with_id] for data_source in dataset_sources_with_id: data_sources.append( PowerBIContentData( content_type=PowerBIContentType.DATA_SOURCE, properties=data_source ) ) semantic_models = [ PowerBIContentData(content_type=PowerBIContentType.SEMANTIC_MODEL, properties=dataset) for dataset in semantic_models_data ] return PowerBIWorkspaceData.from_content_data( self.workspace_id, dashboards + reports + semantic_models + data_sources, ) @public @deprecated( breaking_version="1.9.0", additional_warn_text="Use dagster_powerbi.load_powerbi_asset_specs instead", ) def build_defs( self, dagster_powerbi_translator: type[DagsterPowerBITranslator] = DagsterPowerBITranslator, enable_refresh_semantic_models: bool = False, ) -> Definitions: """Returns a Definitions object which will load Power BI content from the workspace and translate it into assets, using the provided translator. Args: context (Optional[DefinitionsLoadContext]): The context to use when loading the definitions. If not provided, retrieved contextually. dagster_powerbi_translator (Type[DagsterPowerBITranslator]): The translator to use to convert Power BI content into AssetSpecs. Defaults to DagsterPowerBITranslator. enable_refresh_semantic_models (bool): Whether to enable refreshing semantic models by materializing them in Dagster. Returns: Definitions: A Definitions object which will build and return the Power BI content. """ from dagster_powerbi.assets import build_semantic_model_refresh_asset_definition resource_key = f"power_bi_{self.workspace_id.replace('-', '_')}" return Definitions( assets=[ build_semantic_model_refresh_asset_definition(resource_key, spec) if PowerBITagSet.extract(spec.tags).asset_type == "semantic_model" else spec for spec in load_powerbi_asset_specs( self, dagster_powerbi_translator(), use_workspace_scan=False ) ], resources={resource_key: self}, ) @beta def load_powerbi_asset_specs( workspace: PowerBIWorkspace, dagster_powerbi_translator: Optional[ Union[DagsterPowerBITranslator, type[DagsterPowerBITranslator]] ] = None, use_workspace_scan: bool = True, ) -> Sequence[AssetSpec]: """Returns a list of AssetSpecs representing the Power BI content in the workspace. Args: workspace (PowerBIWorkspace): The Power BI workspace to load assets from. dagster_powerbi_translator (Optional[Union[DagsterPowerBITranslator, Type[DagsterPowerBITranslator]]]): The translator to use to convert Power BI content into :py:class:`dagster.AssetSpec`. Defaults to :py:class:`DagsterPowerBITranslator`. use_workspace_scan (bool): Whether to scan the entire workspace using admin APIs at once to get all content. Defaults to True. Returns: List[AssetSpec]: The set of assets representing the Power BI content in the workspace. """ if isinstance(dagster_powerbi_translator, type): deprecation_warning( subject="Support of `dagster_powerbi_translator` as a Type[DagsterPowerBITranslator]", breaking_version="1.10", additional_warn_text=( "Pass an instance of DagsterPowerBITranslator or subclass to `dagster_powerbi_translator` instead." ), ) dagster_powerbi_translator = dagster_powerbi_translator() with workspace.process_config_and_initialize_cm() as initialized_workspace: return check.is_list( PowerBIWorkspaceDefsLoader( workspace=initialized_workspace, translator=dagster_powerbi_translator or DagsterPowerBITranslator(), use_workspace_scan=use_workspace_scan, ) .build_defs() .assets, AssetSpec, ) @dataclass
PowerBIWorkspace
python
mlflow__mlflow
mlflow/tracing/export/async_export_queue.py
{ "start": 465, "end": 959 }
class ____: """A dataclass to represent a simple task.""" handler: Callable[..., Any] args: Sequence[Any] error_msg: str = "" def handle(self) -> None: """Handle the task execution. This method must not raise any exception.""" try: self.handler(*self.args) except Exception as e: _logger.warning( f"{self.error_msg} Error: {e}.", exc_info=_logger.isEnabledFor(logging.DEBUG), )
Task
python
joblib__joblib
joblib/externals/loky/backend/popen_loky_posix.py
{ "start": 496, "end": 685 }
class ____: def __init__(self, fd): self.fd = reduction._mk_inheritable(fd) def detach(self): return self.fd # # Start child process using subprocess.Popen #
_DupFd
python
ansible__ansible
lib/ansible/module_utils/urls.py
{ "start": 13546, "end": 25337 }
class ____(urllib.request.HTTPRedirectHandler): """This is an implementation of a RedirectHandler to match the functionality provided by httplib2. It will utilize the value of ``follow_redirects`` to determine how redirects should be handled in urllib. """ def __init__(self, follow_redirects=None): self.follow_redirects = follow_redirects def __call__(self, *args, **kwargs): super().__init__(*args, **kwargs) return self try: urllib.request.HTTPRedirectHandler.http_error_308 # type: ignore[attr-defined] except AttributeError: # deprecated: description='urllib http 308 support' python_version='3.11' http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 def redirect_request(self, req, fp, code, msg, headers, newurl): follow_redirects = self.follow_redirects # Preserve urllib2 compatibility if follow_redirects in ('urllib2', 'urllib'): return urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl) # Handle disabled redirects elif follow_redirects in ('no', 'none', False): raise urllib.error.HTTPError(newurl, code, msg, headers, fp) method = req.get_method() # Handle non-redirect HTTP status or invalid follow_redirects if follow_redirects in ('all', 'yes', True): if code < 300 or code >= 400: raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) elif follow_redirects == 'safe': if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) else: raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) data = req.data origin_req_host = req.origin_req_host # Be conciliant with URIs containing a space newurl = newurl.replace(' ', '%20') # Support redirect with payload and original headers if code in (307, 308): # Preserve payload and headers req_headers = req.headers else: # Do not preserve payload and filter headers data = None req_headers = {k: v for k, v in req.headers.items() if k.lower() not in ("content-length", "content-type", "transfer-encoding")} # http://tools.ietf.org/html/rfc7231#section-6.4.4 if code == 303 and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if code == 302 and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. if code == 301 and method == 'POST': method = 'GET' return urllib.request.Request( newurl, data=data, headers=req_headers, origin_req_host=origin_req_host, unverifiable=True, method=method.upper(), ) def make_context(cafile=None, cadata=None, capath=None, ciphers=None, validate_certs=True, client_cert=None, client_key=None): if ciphers is None: ciphers = [] if not is_sequence(ciphers): raise TypeError('Ciphers must be a list. Got %s.' % ciphers.__class__.__name__) context = ssl.create_default_context(cafile=cafile) if not validate_certs: context.options |= ssl.OP_NO_SSLv3 context.check_hostname = False context.verify_mode = ssl.CERT_NONE # If cafile is passed, we are only using that for verification, # don't add additional ca certs if validate_certs and not cafile: if not cadata: cadata = bytearray() cadata.extend(get_ca_certs(capath=capath)[0]) if cadata: context.load_verify_locations(cadata=cadata) if ciphers: context.set_ciphers(':'.join(map(to_native, ciphers))) if client_cert: # TLS 1.3 needs this to be set to True to allow post handshake cert # authentication. This functionality was added in Python 3.8 and was # backported to 3.6.7, and 3.7.1 so needs a check for now. if hasattr(context, "post_handshake_auth"): context.post_handshake_auth = True context.load_cert_chain(client_cert, keyfile=client_key) return context def get_ca_certs(cafile=None, capath=None): # tries to find a valid CA cert in one of the # standard locations for the current distribution # Using a dict, instead of a set for order, the value is meaningless and will be None # Not directly using a bytearray to avoid duplicates with fast lookup cadata = {} # If cafile is passed, we are only using that for verification, # don't add additional ca certs if cafile: paths_checked = [cafile] with open(to_bytes(cafile, errors='surrogate_or_strict'), 'r', errors='surrogateescape') as f: for pem in extract_pem_certs(f.read()): b_der = ssl.PEM_cert_to_DER_cert(pem) cadata[b_der] = None return bytearray().join(cadata), paths_checked default_verify_paths = ssl.get_default_verify_paths() default_capath = default_verify_paths.capath paths_checked = {default_capath or default_verify_paths.cafile} if capath: paths_checked.add(capath) system = to_text(platform.system(), errors='surrogate_or_strict') # build a list of paths to check for .crt/.pem files # based on the platform type if system == u'Linux': paths_checked.add('/etc/pki/ca-trust/extracted/pem') paths_checked.add('/etc/pki/tls/certs') paths_checked.add('/usr/share/ca-certificates/cacert.org') elif system == u'FreeBSD': paths_checked.add('/usr/local/share/certs') elif system == u'OpenBSD': paths_checked.add('/etc/ssl') elif system == u'NetBSD': paths_checked.add('/etc/openssl/certs') elif system == u'SunOS': paths_checked.add('/opt/local/etc/openssl/certs') elif system == u'AIX': paths_checked.add('/var/ssl/certs') paths_checked.add('/opt/freeware/etc/ssl/certs') elif system == u'Darwin': paths_checked.add('/usr/local/etc/openssl') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available paths_checked.add('/etc/ansible') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: if not path or path == default_capath or not os.path.isdir(path): continue for f in os.listdir(path): full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in {'.pem', '.cer', '.crt'}: try: with open(full_path, 'r', errors='surrogateescape') as cert_file: cert = cert_file.read() try: for pem in extract_pem_certs(cert): b_der = ssl.PEM_cert_to_DER_cert(pem) cadata[b_der] = None except Exception: continue except OSError: pass # paths_checked isn't used any more, but is kept just for ease of debugging return bytearray().join(cadata), list(paths_checked) def getpeercert(response, binary_form=False): """ Attempt to get the peer certificate of the response from urlopen. """ socket = response.fp.raw._sock try: return socket.getpeercert(binary_form) except AttributeError: pass # Not HTTPS def get_channel_binding_cert_hash(certificate_der): """ Gets the channel binding app data for a TLS connection using the peer cert. """ if not HAS_CRYPTOGRAPHY: return # Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4 cert = x509.load_der_x509_certificate(certificate_der, default_backend()) hash_algorithm = None try: hash_algorithm = cert.signature_hash_algorithm except UnsupportedAlgorithm: pass # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256. if not hash_algorithm or hash_algorithm.name in ('md5', 'sha1'): hash_algorithm = hashes.SHA256() digest = hashes.Hash(hash_algorithm, default_backend()) digest.update(certificate_der) return digest.finalize() def rfc2822_date_string(timetuple, zone='-0000'): """Accepts a timetuple and optional zone which defaults to ``-0000`` and returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Copied from email.utils.formatdate and modified for separate use """ return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], timetuple[2], ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], timetuple[0], timetuple[3], timetuple[4], timetuple[5], zone) def _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc): headers = {} handlers = [] parsed = urlparse(url) if parsed.scheme == 'ftp': return url, headers, handlers username = url_username password = url_password if username: netloc = parsed.netloc elif '@' in parsed.netloc: credentials, netloc = parsed.netloc.split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) else: username = credentials password = '' username = unquote(username) password = unquote(password) # reconstruct url without credentials url = urlunparse(parsed._replace(netloc=netloc)) if use_gssapi: if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function] handlers.append(HTTPGSSAPIAuthHandler(username, password)) else: imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True', url='https://pypi.org/project/gssapi/') raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR) elif username and not force_basic_auth: passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager passman.add_password(None, netloc, username, password) # because we have put None at the start it will always # use this username/password combination for urls # for which `theurl` is a super-url authhandler = urllib.request.HTTPBasicAuthHandler(passman) digest_authhandler = urllib.request.HTTPDigestAuthHandler(passman) # create the AuthHandler handlers.append(authhandler) handlers.append(digest_authhandler) elif username and force_basic_auth: headers["Authorization"] = basic_auth_header(username, password) elif use_netrc: try: rc = netrc.netrc(os.environ.get('NETRC')) login = rc.authenticators(parsed.hostname) except OSError: login = None if login: username, dummy, password = login if username and password: headers["Authorization"] = basic_auth_header(username, password) return url, headers, handlers
HTTPRedirectHandler
python
huggingface__transformers
src/transformers/models/glm4v/modular_glm4v.py
{ "start": 33056, "end": 33307 }
class ____(Qwen2_5_VLPreTrainedModel): _no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"] _can_record_outputs = { "hidden_states": Glm4vTextDecoderLayer, "attentions": Glm4vTextAttention, }
Glm4vPreTrainedModel
python
dask__dask
dask/dataframe/dask_expr/_reductions.py
{ "start": 30654, "end": 30714 }
class ____(IdxMin): _reduction_attribute = "idxmax"
IdxMax
python
realpython__materials
solid-principles-python/shapes_lsp.py
{ "start": 579, "end": 662 }
class ____(ABC): @abstractmethod def calculate_area(self): pass
Shape
python
ray-project__ray
python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py
{ "start": 11772, "end": 16220 }
class ____: def test_prefix_match_empty_tree(self, tree: PrefixTree) -> None: """Test prefix_match on an empty tree returns empty string and None tenants.""" matched_text, matched_tenants = tree.prefix_match("hello") assert matched_text == "" assert matched_tenants is None def test_prefix_match_no_match(self, tree: PrefixTree) -> None: """Test prefix_match for a non-matching prefix returns empty string and all tenants.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("hello", "tenant_1", 1) tree.insert("world", "tenant_2", 2) matched_text, matched_tenants = tree.prefix_match("foobar") assert matched_text == "" assert matched_tenants is not None assert sorted(matched_tenants) == sorted(["tenant_1", "tenant_2"]) def test_prefix_match_query_longer_than_stored_strings( self, tree: PrefixTree ) -> None: """Test prefix_match where query is longer than any stored string but matches a full path.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("helloworld", "tenant_1", 1) tree.insert("hellothere", "tenant_2", 2) matched_text, matched_tenants = tree.prefix_match("hellothereextra") assert matched_text == "hellothere" assert matched_tenants == ["tenant_2"] def test_prefix_match_exact_match(self, tree: PrefixTree) -> None: """Test prefix_match with an exact match for a single tenant.""" tree.add_tenants(["tenant_1"], 0) tree.insert("hello", "tenant_1", 1) matched_text, matched_tenants = tree.prefix_match("hello") assert matched_text == "hello" assert matched_tenants == ["tenant_1"] def test_prefix_match_partial_match(self, tree: PrefixTree) -> None: """Test prefix_match with a partial query matching the longest common part of a branch.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("apple", "tenant_1", 1) tree.insert("apricot", "tenant_2", 2) matched_text, matched_tenants = tree.prefix_match("application") assert matched_text == "appl" # Longest of ("appl", "ap") assert matched_tenants == ["tenant_1"] def test_prefix_match_with_tenant_filter(self, tree: PrefixTree) -> None: """Test prefix_match with a tenant filter selecting a specific branch.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("apple", "tenant_1", 1) tree.insert("apricot", "tenant_2", 2) matched_text, matched_tenants = tree.prefix_match("application", ["tenant_2"]) assert matched_text == "ap" assert matched_tenants == ["tenant_2"] def test_prefix_match_with_shared_prefix_tenant_filter( self, tree: PrefixTree ) -> None: """Test prefix_match with a tenant filter when one tenant has a prefix of a longer string.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("apple", "tenant_1", 1) tree.insert("applepie", "tenant_2", 2) # Match the longer string but only allow tenant_1 matched_text, matched_tenants = tree.prefix_match("applepie", ["tenant_1"]) # Should only match up to "apple" as that's what tenant_1 owns assert matched_text == "apple" assert matched_tenants == ["tenant_1"] # Verify that using both tenants would match the full string for tenant_2 only matched_text, matched_tenants = tree.prefix_match( "applepie", ["tenant_1", "tenant_2"] ) assert matched_text == "applepie" assert matched_tenants == ["tenant_2"] # And both tenants should be returned for "apple" matched_text, matched_tenants = tree.prefix_match( "apple", ["tenant_1", "tenant_2"] ) assert matched_text == "apple" assert set(matched_tenants) == {"tenant_1", "tenant_2"} def test_prefix_match_with_non_existent_tenant_filter( self, tree: PrefixTree ) -> None: """Test prefix_match with a filter for a non-existent tenant returns no match.""" tree.add_tenants(["tenant_1"], 0) tree.insert("apple", "tenant_1", 1) matched_text, matched_tenants = tree.prefix_match( "application", ["non_existent_tenant"] ) assert matched_text == "" assert matched_tenants is None
TestPrefixTreeMatch
python
apache__airflow
providers/apache/kylin/src/airflow/providers/apache/kylin/hooks/kylin.py
{ "start": 971, "end": 2822 }
class ____(BaseHook): """ Interact with Kylin to run CubeSource commands and get job status. :param kylin_conn_id: The connection id as configured in Airflow administration. :param project: project name :param dsn: dsn """ conn_name_attr = "kylin_conn_id" default_conn_name = "kylin_default" conn_type = "kylin" hook_name = "Apache Kylin" def __init__( self, kylin_conn_id: str = default_conn_name, project: str | None = None, dsn: str | None = None, ): super().__init__() self.kylin_conn_id = kylin_conn_id self.project = project self.dsn = dsn def get_conn(self): conn = self.get_connection(self.kylin_conn_id) if self.dsn: return kylinpy.create_kylin(self.dsn) self.project = self.project or conn.schema return kylinpy.Kylin( conn.host, username=conn.login, password=conn.password, port=conn.port, project=self.project, **conn.extra_dejson, ) def cube_run(self, datasource_name, op, **op_args): """ Run CubeSource command which in CubeSource.support_invoke_command. :param datasource_name: :param op: command :param op_args: command args :return: response """ cube_source = self.get_conn().get_datasource(datasource_name) try: return cube_source.invoke_command(op, **op_args) except exceptions.KylinError as err: raise AirflowException(f"Cube operation {op} error , Message: {err}") def get_job_status(self, job_id): """ Get job status. :param job_id: kylin job id :return: job status """ return self.get_conn().get_job(job_id).status
KylinHook
python
getsentry__sentry
tests/sentry/preprod/api/endpoints/test_project_preprod_artifact_update.py
{ "start": 17140, "end": 18416 }
class ____(TestCase): def test_exact_version_matching_prevents_incorrect_matches(self): package = "com.hackernews" version = "1.2.3" self.create_release(project=self.project, version=f"{package}@{version}333333") self.create_release(project=self.project, version=f"{package}@{version}.0") self.create_release(project=self.project, version=f"{package}@{version}-beta") result = find_or_create_release(self.project, package, version) assert result is not None assert result.version == f"{package}@{version}" def test_finds_existing_release_regardless_of_build_number(self): package = "com.example.app" version = "2.1.0" existing_release = self.create_release( project=self.project, version=f"{package}@{version}+456" ) result = find_or_create_release(self.project, package, version) assert result is not None assert result.id == existing_release.id result_with_build = find_or_create_release(self.project, package, version, 789) assert result_with_build is not None assert result_with_build.id == existing_release.id assert result_with_build.version == f"{package}@{version}+456"
FindOrCreateReleaseTest
python
chroma-core__chroma
chromadb/config.py
{ "start": 3983, "end": 4597 }
class ____(Enum): """ Routing mode for the segment directory node - Assign based on the node name, used in production with multi-node settings with the assumption that there is one query service pod per node. This is useful for when there is a disk based cache on the node that we want to route to. id - Assign based on the member id, used in development and testing environments where the node name is not guaranteed to be unique. (I.e a local development kubernetes env). Or when there are multiple query service pods per node. """ NODE = "node" ID = "id"
RoutingMode
python
kennethreitz__tablib
tests/test_tablib.py
{ "start": 30592, "end": 31662 }
class ____(BaseTestCase): def test_xls_format_detect(self): """Test the XLS format detection.""" in_stream = self.founders.xls self.assertEqual(detect_format(in_stream), 'xls') def test_xls_date_import(self): xls_source = Path(__file__).parent / 'files' / 'dates.xls' with open(str(xls_source), mode='rb') as fh: dset = tablib.Dataset().load(fh, 'xls') self.assertEqual(dset.dict[0]['birth_date'], datetime.datetime(2015, 4, 12, 0, 0)) def test_xls_import_with_errors(self): """Errors from imported files are kept as errors.""" xls_source = Path(__file__).parent / 'files' / 'errors.xls' with xls_source.open('rb') as fh: data = tablib.Dataset().load(fh.read()) self.assertEqual( data.dict[0], OrderedDict([ ('div by 0', '#DIV/0!'), ('name unknown', '#NAME?'), ('not available (formula)', '#N/A'), ('not available (static)', '#N/A') ]) )
XLSTests
python
pytorch__pytorch
torch/_inductor/compile_fx.py
{ "start": 42617, "end": 86323 }
class ____(FxCompile): @override def codegen_and_compile( self, gm: GraphModule, example_inputs: Sequence[InputType], inputs_to_check: Sequence[int], graph_kwargs: _CompileFxKwargs, ) -> OutputCode: """ Generates the OutputCode from the GraphModule and example_inputs. """ # Sorry about the mess, we need graph_kwargs to continue to be able # to propagate it further on # TODO: _CompileFxKwargs actually has stronger types than in the # signature, need to tighten it up assert "cudagraphs" in graph_kwargs and graph_kwargs["cudagraphs"] is not None cudagraphs: BoxedBool = graph_kwargs["cudagraphs"] static_input_idxs: Sequence[int] = graph_kwargs.get("static_input_idxs", ()) is_backward: bool = graph_kwargs.get("is_backward", False) graph_id: Optional[int] = graph_kwargs.get("graph_id", None) cpp_wrapper: bool = graph_kwargs.get("cpp_wrapper", False) fx_wrapper: bool = graph_kwargs.get("fx_wrapper", False) aot_mode: bool = V.aot_compilation is_inference: bool = graph_kwargs.get("is_inference", False) extern_node_serializer: Optional[Callable[[list[ExternKernelNode]], Any]] = ( graph_kwargs.get("extern_node_serializer", None) ) with ( _WaitCounter("pytorch.wait_counter.actual_codegen_and_compile").guard(), dynamo_utils.preserve_rng_state(), ): if (sleep_sec := config.sleep_sec_TESTING_ONLY) is not None: import time log.warning( "Sleeping for %s since sleep_sec_TESTING_ONLY is set", sleep_sec ) time.sleep(sleep_sec) if is_tf32_warning_applicable(gm): _warn_tf32_disabled() inductor_counters = counters["inductor"].copy() # lift the maximum depth of the Python interpreter stack # to adapt large/deep models sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000)) _step_logger()( logging.INFO, "torchinductor compiling " f"{'BACKWARDS' if is_backward else 'FORWARDS'} " f"graph {graph_id}", ) fd = io.StringIO() torch._dynamo.repro.after_aot.save_graph_repro( fd, gm, example_inputs, "inductor", save_dir=None ) runnable_graph_str = fd.getvalue() trace_structured( "artifact", metadata_fn=lambda: { "name": "fx_graph_runnable", "encoding": "string", }, payload_fn=lambda: runnable_graph_str, ) V.debug.fx_graph(gm, example_inputs) # TODO: Should we actually dump this? It should be redundant with the aot # structured logs... # trace_structured("inductor_input_graph", payload_fn=lambda: gm.print_readable(print_output=False)) shape_env = gm.shape_env if shape_env is None: shape_env = shape_env_from_inputs(example_inputs) # Convert view to reshape in the graph. This is necessary primarily for # layout optimization. Do it unconditionally for uniformity. # # It's needed because when we do layout optimization, an contiguous tensor # in eager mode may becomes a channels last tensor. A view op previously # can be applied to the contiguous tensor may not be able to be applied # on the channels tensor any more. An error like # RuntimeError: view size is not compatible with input tensor's size and stride # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. # will be printed. # # Replace view op to reshape op in this case. # As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this. # # Also this has to be done before FakeTensorProp below to avoid the failed # .view() call. view_to_reshape(gm) with dynamo_timed( "additional_fake_tensor_prop", log_pt2_compile_event=True ): # It is safe to run FakeTensorProp under no_grad because by the time # we're in inductor, we assume that AOTAutograd has already "taken care" # of autograd, so there should be no more autograd-related API's in the # graph. with torch.no_grad(): fake_mode = fake_tensor_prop(gm, example_inputs) _recursive_record_original_output_strides(gm) # pattern matcher passes might not preserve striding information # on node.meta["val"]. if in the future we rely on these being # correct we will need to fix. trace_structured( "artifact", metadata_fn=lambda: { "name": "before_post_grad_graph", "encoding": "string", }, payload_fn=lambda: gm.print_readable( print_output=False, include_stride=True, include_device=True ), ) with V.set_fake_mode(fake_mode): # has some issues with memory in training cuda_context = get_cuda_device_context(gm) with cuda_context: _recursive_post_grad_passes(gm, is_inference=is_inference) V.debug.fx_graph_transformed(gm, example_inputs) post_grad_graphs_log.debug( "%s", lazy_format_graph_code( "AFTER POST GRAD", gm, include_stride=True, include_device=True, colored=True, ), ) # We're printing the graph to be used as a cache key - so a # printer which is a little less readable but faster is # appropriate. inductor_post_grad_graph_str = gm.print_readable( print_output=False, include_stride=True, include_device=True, fast_sympy_print=True, ) # "inductor_post_grad_graph" is used in inductor provenance # tracking highlighter front-end. trace_structured( "artifact", metadata_fn=lambda: { "name": "inductor_post_grad_graph", "encoding": "string", }, payload_fn=lambda: inductor_post_grad_graph_str, ) if config.trace.provenance_tracking_level != 0: provenance_tracking_json = ( torch.fx.traceback.get_graph_provenance_json(gm.graph) ) torch._inductor.debug._inductor_post_to_pre_grad_nodes = ( create_mapping_pre_post_grad_nodes( torch._inductor.debug._pre_grad_graph_id, provenance_tracking_json, ) ) metrics_context = get_metrics_context() if metrics_context.in_progress(): num_graph_breaks = counters["graph_break"].total() CompileEventLogger.compilation_metric( overwrite=True, num_graph_breaks=num_graph_breaks ) if config.is_fbcode(): try: log_optimus_to_scuba( extra_logging={ "pt2_configs": str(get_patched_config_dict()) } ) except Exception: # TODO(T216453900): need to work around for now to support vllm # See details in vllm/compilation/pass_manager.py. log.warning("failed to log pt2_configs") with ( V.set_fake_mode(fake_mode), maybe_disable_comprehensive_padding(example_inputs), maybe_disable_graph_partition(cpp_wrapper, aot_mode), ): const_output_index = None const_graph = None const_wrapper_code = None const_kernel_code = None if aot_mode and config.aot_inductor.use_runtime_constant_folding: # torchbind objects have name that starts with _torchbind_obj # See caffe2/torch/fx/_symbolic_trace.py?lines=406 const_gm, const_output_index = split_const_gm( gm, skip_folding_node_fn=lambda node: node.op == "get_attr" and isinstance(node.target, str) and ( node.target.startswith("_torchbind_obj") or isinstance(node.meta.get("val", None), FakeScriptObject) ), ) const_graph = GraphLowering( const_gm, example_inputs=[], shape_env=shape_env, graph_id=graph_id, cpp_wrapper=cpp_wrapper, aot_mode=aot_mode, extern_node_serializer=extern_node_serializer, is_inference=is_inference, is_backward=is_backward, is_const_graph=True, fx_wrapper=fx_wrapper, ) with ( V.set_graph_handler(const_graph), V.set_extern_kernel_nodes([]), ): assert cpp_wrapper, "AOT mode only supports C++ wrapper" const_graph.run() const_wrapper_code, const_kernel_code = ( const_graph.codegen_with_cpp_wrapper() ) graph = GraphLowering( gm, # example_inputs will be used by AOTInductor to dry-run the generated code for Triton kernel tuning. # For the forward pass, we have the real inputs to be used as example_inputs. For the backward pass, # we currently use fake tensors and defake them later. example_inputs=example_inputs, shape_env=shape_env, graph_id=graph_id, cpp_wrapper=cpp_wrapper, aot_mode=aot_mode, extern_node_serializer=extern_node_serializer, is_inference=is_inference, is_backward=is_backward, const_output_index=const_output_index, const_wrapper_code=( const_wrapper_code.value if const_wrapper_code else None ), const_kernel_code=( const_kernel_code.value if const_kernel_code else None ), const_module=const_graph, inputs_to_check=inputs_to_check, fx_wrapper=fx_wrapper, ) metrics_helper = metrics.CachedMetricsHelper() # We are going to start code generating runtime asserts, so make sure # you don't start adding new ones in the lowering process graph.freeze_runtime_asserts() with ( V.set_graph_handler(graph), V.set_extern_kernel_nodes([]), distributed_autotune.graph_context(), ): graph.run(*example_inputs) output_strides: list[Optional[tuple[_StrideExprStr, ...]]] = [] if graph.graph_outputs is not None: # We'll put the output strides in the compiled graph so we # can later return them to the caller via TracingContext p = SymExprPrinter() for out in graph.graph_outputs: if ( isinstance(out, IRNode) and out.has_tensor_output() and len(free_unbacked_symbols(out.get_stride())) == 0 ): # Convert to string for eval on the load path output_strides.append( tuple(p.doprint(s) for s in out.get_layout().stride) ) else: output_strides.append(None) _check_triton_bf16_support(graph) # TODO: The switching between AOT mode and not here is a bit # messy, but it's localized to the block of code below so I'm # not going to touch it for now compiled_fn: Any compiled_fn_runner = None with dynamo_timed( "GraphLowering.compile_to_fn", log_pt2_compile_event=True ): if graph.aot_mode and graph.fx_wrapper: assert not graph.cpp_wrapper compiled_fn = graph.codegen()[0].gm # type: ignore[attr-defined] output_code_log.debug( "Output graph module: \n%s", compiled_fn.print_readable(print_output=False), ) elif graph.aot_mode: from .codecache import AotCodeCompiler assert graph.cpp_wrapper, ( "AOT mode only supports C++ wrapper" ) wrapper_code, kernel_code = graph.codegen_with_cpp_wrapper() output_code_log.debug( "Output wrapper code: \n%s", wrapper_code.value ) if kernel_code.value: output_code_log.debug( "Output kernel code:\n%s", kernel_code.value ) serialized_extern_kernel_nodes = None if V.extern_kernel_nodes: serialized_extern_kernel_nodes = ( graph.extern_node_serializer(V.extern_kernel_nodes) ) output_code_log.debug( "Serialized Extern Kernel Nodes: \n%s", serialized_extern_kernel_nodes, ) with dynamo_timed( "AotCodeCompiler.compile", log_pt2_compile_event=True ): # Directly return the file path with the compiled code compiled_fn = AotCodeCompiler.compile( graph, wrapper_code.value, kernel_code.value, serialized_extern_kernel_nodes, device_type=graph.device_type, additional_files=[ *dict.fromkeys( graph.wrapper_code.additional_files + ( const_graph.wrapper_code.additional_files if const_graph else [] ) ) ], ) else: compiled_module = graph.compile_to_module() compiled_fn = compiled_module.call compiled_fn_runner = getattr( compiled_module, "runner", None ) # Dump provenance artifacts for debugging trace inductor_provenance_tracking_node_mappings = None inductor_kernel_stack_trace_str = None if config.trace.provenance_tracking_level != 0: inductor_provenance_tracking_node_mappings = json.dumps( torch._inductor.debug.dump_inductor_provenance_info() ) inductor_kernel_stack_trace_str = json.dumps( torch._inductor.debug._inductor_kernel_stack_trace ) trace_structured( "artifact", metadata_fn=lambda: { "name": "inductor_provenance_tracking_node_mappings", "encoding": "json", }, payload_fn=lambda: inductor_provenance_tracking_node_mappings, ) trace_structured( "artifact", metadata_fn=lambda: { "name": "inductor_provenance_tracking_kernel_stack_traces", "encoding": "json", }, payload_fn=lambda: inductor_kernel_stack_trace_str, ) if inductor_kernel_stack_trace_str: metrics_context = get_metrics_context() if metrics_context.in_progress(): metrics_context.add_to_set( "inductor_provenance", inductor_kernel_stack_trace_str, ) node_runtimes = None if inductor_metrics_log.isEnabledFor(logging.INFO): num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes() # pyrefly: ignore [bad-assignment] metrics.num_bytes_accessed += num_bytes metrics.node_runtimes += node_runtimes metrics.nodes_num_elem += nodes_num_elem inductor_metrics_log.info( "Graph Metrics:\n%s", { "num_bytes_accessed": num_bytes, "nodes_num_elem": nodes_num_elem, "node_runtimes": node_runtimes, }, ) # Collect and dump op runtimes and tensor metadata for TLParse if config.log_tlparse: _, _, node_runtimes = graph.count_bytes() torch._inductor.debug.log_runtime_and_tensor_meta(node_runtimes) # Collect and dump collective-op schedule for external diagnostics torch._inductor.debug.log_collective_schedule(graph.scheduler.nodes) if ( cudagraphs and config.triton.cudagraph_skip_dynamic_graphs and not V.graph.disable_cudagraphs_reason and torch._inductor.utils.any_is_symbolic(*example_inputs) ): stack_trace = None for node in gm.graph.nodes: meta_val = node.meta.get("val", None) if ( node.op == "placeholder" or not isinstance(meta_val, torch.Tensor) or not torch._inductor.utils.any_is_symbolic(meta_val) ): continue if stack_trace := node.meta.get("stack_trace", None): break disable = "graph with symbolic shapes inputs and config.triton.cudagraph_skip_dynamic_graphs=True." if stack_trace: disable = f"{disable} Found from {stack_trace}\n" else: disable = f"{disable}\n" # pyrefly: ignore [unbound-name] V.graph.disable_cudagraphs_reason = disable # pyrefly: ignore [unbound-name] if cudagraphs and not V.graph.disable_cudagraphs_reason: maybe_incompat_node = get_first_incompatible_cudagraph_node(gm) if maybe_incompat_node: disable = f"disabling cudagraphs due to incompatible op {maybe_incompat_node.target}" if stack_trace := maybe_incompat_node.meta.get( "stack_trace", None ): disable = f"{disable} Found from {stack_trace}\n" # pyrefly: ignore [unbound-name] V.graph.disable_cudagraphs_reason = disable # pyrefly: ignore [unbound-name] if V.aot_compilation: assert isinstance( compiled_fn, # pyrefly: ignore [unbound-name] (str, list, torch.fx.GraphModule), ), type(compiled_fn) return CompiledAOTI( filename=compiled_fn, device_type=graph.device_type ) # TODO: Hoist this above V.aot_compilation # pyrefly: ignore [unbound-name] if cudagraphs and not V.graph.disable_cudagraphs_reason: from torch._inductor.cudagraph_utils import ( check_lowering_disable_cudagraph, ) # pyrefly: ignore [unbound-name] V.graph.disable_cudagraphs_reason = ( check_lowering_disable_cudagraph( # pyrefly: ignore [unbound-name] V.graph.device_node_mapping ) ) self._compile_stats[type(self)].codegen_and_compile += 1 if ( # pyrefly: ignore [unbound-name] torch._inductor.debug.RECORD_GRAPH_EXECUTION # pyrefly: ignore [unbound-name] and torch._inductor.debug.GRAPH_COMPILE_IDS is not None ): compile_id = str( # pyrefly: ignore [unbound-name] torch._guards.CompileContext.current_compile_id() ) graph_id = graph_kwargs.get("graph_id") if graph_id is not None: # pyrefly: ignore [unbound-name] torch._inductor.debug.GRAPH_COMPILE_IDS[graph_id] = ( compile_id ) return CompiledFxGraph( # pyrefly: ignore [bad-argument-type] compiled_fn, graph, gm, output_strides, # pyrefly: ignore [unbound-name] V.graph.disable_cudagraphs_reason, metrics_helper.get_deltas(), counters["inductor"] - inductor_counters, cudagraphs, example_inputs, static_input_idxs, graph_kwargs, inputs_to_check, runnable_graph_str, inductor_post_grad_graph_str, compiled_fn_runner, inductor_provenance_tracking_node_mappings, inductor_kernel_stack_trace_str, ) def fx_codegen_and_compile( gm: GraphModule, example_inputs: Sequence[InputType], # This is derivable from the other inputs to this function, but we pass it # in explicitly because it's nontrivial to compute inputs_to_check: Sequence[int], **graph_kwargs: Unpack[_CompileFxKwargs], ) -> OutputCode: scheme: FxCompile if fx_compile_mode == FxCompileMode.NORMAL: scheme = _InProcessFxCompile() elif fx_compile_mode == FxCompileMode.SERIALIZE: from .compile_fx_ext import _DebugSerdeFxCompile scheme = _DebugSerdeFxCompile() elif fx_compile_mode == FxCompileMode.SUBPROCESS: from .compile_fx_subproc import _SubprocessFxCompile scheme = _SubprocessFxCompile() if fx_compile_async: from .compile_fx_async import _AsyncFxCompile from .compile_fx_ext import _OutOfProcessFxCompile # pyrefly: ignore [unbound-name] assert isinstance(scheme, _OutOfProcessFxCompile), ( "async is only valid with an out-of-process compile mode" ) # pyrefly: ignore [unbound-name] scheme = _AsyncFxCompile(scheme) if fx_compile_progressive: from .compile_fx_async import _ProgressiveFxCompile from .compile_fx_ext import _OutOfProcessFxCompile # pyrefly: ignore [unbound-name] assert isinstance(scheme, _OutOfProcessFxCompile), ( "progressive is only valid with an out-of-process compile mode" ) progression_configs = _get_progression_configs() # Use in-process compile for the fast version fast_scheme = _InProcessFxCompile() # pyrefly: ignore [unbound-name] scheme = _ProgressiveFxCompile(fast_scheme, scheme, progression_configs) # pyrefly: ignore [unbound-name] return scheme.codegen_and_compile(gm, example_inputs, inputs_to_check, graph_kwargs) def get_input_idxs_to_check( inputs: Sequence[InputType], static_input_idxs: Sequence[int], ) -> Sequence[int]: """ This function runs at compile time, and generates a list of indices for which we might need to do a copy to preserve alignment requirements. """ ids_to_check = [] for i, input in enumerate(inputs): if not isinstance(input, torch.Tensor): # non-tensors don't need alignment continue if not is_gpu(input.device.type): # right now we only care for gpu tensors continue with maybe_get_suppress_shape_guards_ctx(): # suppress guards so that tensor_is_aligned and should_assume_input_aligned # do not add guards on input's storage offset if i in static_input_idxs and tensor_is_aligned(input): continue if not should_assume_input_aligned(input): continue # if we get here, then # (a) our triton code assumes that the input is aligned # (b) we can't be sure ahead of time that the input will actually be aligned. # therefore, at runtime, we'll need to check that the input is aligned # (and if not, clone it to make it aligned.) ids_to_check.append(i) return ids_to_check def cudagraphify( model: Callable[..., Any], static_input_idxs: Sequence[int] = (), *, device_index: int, stack_traces: list[Optional[str]], is_backward: bool, is_inference: bool, constants: tuple[torch.Tensor, ...] = (), placeholders: Sequence[PlaceholderInfo] = (), mutated_input_idxs: tuple[int, ...] = (), ) -> Callable[..., Any]: from torch._inductor.cudagraph_trees import ( cudagraphify_impl as new_cudagraphify_impl, ) cudagraphify_fn: Callable[..., Any] if config.triton.cudagraph_trees: cudagraphify_fn = functools.partial( new_cudagraphify_impl, device_index=device_index, stack_traces=stack_traces, is_backward=is_backward, is_inference=is_inference, constants=constants, placeholders=placeholders, mutated_input_idxs=mutated_input_idxs, compile_id=torch._guards.CompileContext.current_compile_id(), ) else: cudagraphify_fn = cudagraphify_impl compiled_fn = None def run(new_inputs: Sequence[InputType]) -> Any: nonlocal compiled_fn if compiled_fn is None: with dynamo_utils.preserve_rng_state(): compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs) # type: ignore[arg-type] return compiled_fn(new_inputs) # type: ignore[arg-type] return run def static_input(x: torch.Tensor) -> torch.Tensor: """ Copy and input while preserving strides """ return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device) def index_expanded_dims_and_copy_( dst: torch.Tensor, src: torch.Tensor, expanded_dims: list[int], ) -> None: "Index into expanded dimensions of both dst and src then copy_" dst = index_expanded_dims(dst, expanded_dims) src = index_expanded_dims(src, expanded_dims) dst.copy_(src) def cudagraphify_impl( model: Callable[..., Any], inputs: list[torch.Tensor], static_input_idxs: Sequence[int] = (), ) -> Callable[[list[InputType]], Any]: """ Assumes inputs[static_input_idxs[i]] are always the same memory address """ check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs) # type: ignore[arg-type] # pyrefly: ignore [annotation-mismatch] static_input_idxs: OrderedSet[int] = OrderedSet( remove_unaligned_input_idxs(inputs, static_input_idxs) # type: ignore[arg-type] ) copy_misaligned_inputs(inputs, check_input_idxs) # type: ignore[arg-type] assert isinstance(inputs, list) inps_expanded_dims = [ get_expanded_dims(x) if idx not in static_input_idxs else [] for idx, x in enumerate(inputs) ] # allocate static tensor inputs static_inputs = [ ( x if not isinstance(x, torch.Tensor) else static_input(x) if idx not in static_input_idxs else x.detach() ) for idx, x in enumerate(inputs) ] # copy over input values for fresh allocations for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)): if isinstance(x, torch.Tensor) and idx not in static_input_idxs: index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims) # warmup torch.cuda.synchronize() stream = torch.cuda.Stream() stream.wait_stream(torch.cuda.current_stream()) # copy static_inputs because it will be cleared in model with torch.cuda.stream(stream): model(list(static_inputs)) stream.synchronize() torch.cuda.current_stream().wait_stream(stream) torch.cuda.synchronize() # record graph = torch.cuda.CUDAGraph() with torch.cuda.graph(graph, stream=stream, capture_error_mode="thread_local"): static_outputs = model(list(static_inputs)) if not isinstance(static_outputs, (list, tuple)): static_outputs = (static_outputs,) if config.size_asserts: def run(new_inputs: list[InputType]) -> Callable[[list[InputType]], Any]: assert len(static_inputs) == len(new_inputs) for idx, (dst, src, expanded_dims) in enumerate( zip(static_inputs, new_inputs, inps_expanded_dims) ): if not isinstance(dst, torch.Tensor): continue assert isinstance(src, torch.Tensor) if idx in static_input_idxs: assert dst.data_ptr() == src.data_ptr() else: # TODO - could make one single op of multiple slices # and avoid dispatch. # Could also pre-index the `dst` tensors index_expanded_dims_and_copy_(dst, src, expanded_dims) new_inputs.clear() graph.replay() # pyrefly: ignore [bad-return] return static_outputs else: copy_indices = [ idx for idx in range(len(static_inputs)) if idx not in static_input_idxs ] def run(new_inputs: list[InputType]) -> Callable[[list[InputType]], Any]: for idx in copy_indices: expanded_dims = inps_expanded_dims[idx] src = new_inputs[idx] assert isinstance(src, torch.Tensor) index_expanded_dims_and_copy_(static_inputs[idx], src, expanded_dims) new_inputs.clear() graph.replay() # pyrefly: ignore [bad-return] return static_outputs return align_inputs_from_check_idxs(run, check_input_idxs, OrderedSet()) def compile_fx_aot( model_: GraphModule, example_inputs_: list[InputType], inner_compile: _CompileFxCallable = compile_fx_inner, config_patches: Optional[dict[str, Any]] = None, ) -> Union[list[Union[str, Weights]], str, GraphModule]: assert isinstance(model_, GraphModule), model_ # [See NOTE] Unwrapping subclasses AOT unwrap_tensor_subclass_parameters(model_) # pyrefly: ignore [annotation-mismatch] config_patches: dict[str, Any] = copy.deepcopy(config_patches or {}) if not (config_patches.get("fx_wrapper", False) or config.fx_wrapper): # If fx_wrapper is not set, then set cpp_wrapper config_patches["cpp_wrapper"] = True output_path = config_patches.get( "aot_inductor.output_path", config.aot_inductor.output_path ) if output_path: assert not output_path.endswith(".pt2"), ( "The output path for aot_compile should not have an extension with .pt2 " "this is for specifying the output path for the .so in AOTInductor. " "If you would like to package the AOTInductor generated files " "into a pt2, please call `torch._inductor.aoti_compile_and_package`." ) else: config_patches = { **config_patches, "aot_inductor.output_path": code_hash(model_.code), } from .utils import maybe_aoti_standalone_config config_patches = maybe_aoti_standalone_config(config_patches) extern_node_serializer = config_patches.pop("extern_node_serializer", None) saved_compile_id = model_.meta.get("dynamo_compile_id", None) saved_compile_context = torch._guards.CompileContext(saved_compile_id) with ( V.set_aot_compilation(True), torch._guards.compile_context(saved_compile_context), chromium_event_timed( "compile_fx_aot", log_pt2_compile_event=True, reset_event_log_on_exit=True, ), get_metrics_context(), ): compiled_artifacts = compile_fx( model_, example_inputs_, inner_compile=functools.partial( inner_compile, extern_node_serializer=extern_node_serializer, ), config_patches=config_patches, ) assert isinstance(compiled_artifacts, CompiledAOTI) return compiled_artifacts.filename _graph_counter = count(0) def fw_compiler_freezing( aot_autograd_model: GraphModule, aot_example_inputs: Sequence[InputType], dynamo_model: GraphModule, num_example_inputs: int, inner_compile: Callable[..., Any], cudagraphs: BoxedBool, graph_id: int, forward_device: BoxedDeviceIndex, ) -> Callable[[list[object]], Sequence[torch.Tensor]]: from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze # partition_fn won't be called _recursive_joint_graph_passes(aot_autograd_model) layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model, is_inference=True) if layout_opt: # make sure meta['val'] is properly setup fake_tensor_prop(aot_autograd_model, aot_example_inputs, True) convert_conv_weights_to_channels_last(aot_autograd_model) opt_model, preserved_arg_indices = freeze( dynamo_model, aot_autograd_model, aot_example_inputs, # type: ignore[arg-type] ) aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices] fake_mode = detect_fake_mode(aot_example_inputs) # for freezing, all graph outputs should be user visible *_, model_outputs_node = opt_model.graph.nodes model_outputs = model_outputs_node.args[0] model_outputs_node.meta["user_visible_output_idxs"] = [ idx for idx, n in enumerate(model_outputs) if isinstance(n, torch.fx.Node) ] static_input_idxs: list[Any] = [] # constant params will be real tensors, not fake tracing_context = torch._guards.TracingContext.try_get() unwrapped_args_offsets = [0] max_offset_idx = 0 if tracing_context is not None: assert tracing_context.params_flat_unwrap_subclasses is not None params_flat_unwrap = tracing_context.params_flat_unwrap_subclasses max_offset_idx = max(0, len(params_flat_unwrap) - 1) preserved_indices_params_flat = OrderedSet[int]() unwrapped_idxs = tracing_context.params_unwrapped_to_flat_index assert unwrapped_idxs is not None current_offset = 0 if len(params_flat_unwrap) > 0: unwrapped_args_offsets = [] for i in range(len(params_flat_unwrap)): if i not in preserved_arg_indices: params_flat_unwrap[i] = None if i > 0 and unwrapped_idxs[i] == unwrapped_idxs[i - 1]: current_offset += 1 else: preserved_indices_params_flat.add(unwrapped_idxs[i]) unwrapped_args_offsets.append(current_offset) # Deallocate wrapped params, if all subelements were deallocated assert tracing_context.params_flat is not None for i in range(len(tracing_context.params_flat)): if i not in preserved_indices_params_flat: tracing_context.params_flat[i] = None if tracing_context.fw_metadata: static_input_idxs = tracing_context.fw_metadata.static_input_indices with mock.patch.object(fake_mode, "allow_non_fake_inputs", True): optimized_function = inner_compile( opt_model, aot_example_inputs, static_input_idxs=static_input_idxs, cudagraphs=cudagraphs, graph_id=graph_id, is_inference=True, boxed_forward_device_index=forward_device, layout_opt=layout_opt, ) # aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper # that drops constant-ified params if V.aot_compilation: return optimized_function def wrapper(args: list[object]) -> Sequence[torch.Tensor]: args_new = [ args[i - unwrapped_args_offsets[min(i, max_offset_idx)]] for i in preserved_arg_indices ] args.clear() return optimized_function(args_new) wrapper._boxed_call = True # type: ignore[attr-defined] return wrapper def get_cpp_wrapper_config() -> dict[str, object]: if config.triton.cudagraphs: log_cudagraph_skip_and_bump_counter( format_default_skip_message("cpp wrapper enabled") ) return { # Set autotune_at_compile_time to True as default if the option is not explicitly set "triton.autotune_at_compile_time": ( config.triton.autotune_at_compile_time if config.triton.autotune_at_compile_time is not None else has_triton() ), "triton.autotune_cublasLt": False, "triton.cudagraphs": False, # TODO: to be removed "triton.store_cubin": True, } def get_cuda_device_context(gm: torch.fx.GraphModule) -> AbstractContextManager[None]: """ Returns a cuda device context manager if there is a single device in the graph """ if not torch.cuda.is_available(): return contextlib.nullcontext() cuda_devices: OrderedSet[torch.device] = OrderedSet( device for device in get_all_devices(gm) if device.type == "cuda" ) return ( torch.cuda.device(next(iter(cuda_devices))) # type: ignore[return-value] if len(cuda_devices) == 1 else contextlib.nullcontext() ) def partition_fn( gm: GraphModule, joint_inputs: Sequence[object], **kwargs: object, ) -> tuple[GraphModule, GraphModule]: cuda_context = get_cuda_device_context(gm) with cuda_context: # We can skip the invoke_subgraph because the # entire_partition_fn is called recursively for invoke_subgraph # in partitioning. _recursive_joint_graph_passes(gm, skip_invoke_subgraph=True) static_lifetime_input_indices: Optional[list[int]] = kwargs.pop( # type: ignore[assignment] "static_lifetime_input_indices", None ) if config.custom_partitioner_fn is None: with dynamo_utils.dynamo_timed( "min_cut_rematerialization_partition", log_pt2_compile_event=True ): return min_cut_rematerialization_partition( gm, joint_inputs, compiler="inductor", static_lifetime_input_indices=static_lifetime_input_indices, **kwargs, ) else: assert isinstance(config.custom_partitioner_fn, CustomPartitionerFn) with dynamo_utils.dynamo_timed( config.custom_partitioner_fn.__class__.__name__, log_pt2_compile_event=True, ): return config.custom_partitioner_fn( gm, joint_inputs, compiler="inductor", static_lifetime_input_indices=static_lifetime_input_indices, **kwargs, ) def get_num_model_outputs(model: GraphModule) -> int: model_outputs_node = output_node(model) model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args) return len(model_outputs) @dataclass(frozen=True)
_InProcessFxCompile