language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/tests/test_specs_secrets_mask.py
{ "start": 3078, "end": 11547 }
class ____: """Tests for _get_specs_secrets_from_registry_entries function.""" @pytest.fixture def mock_registry_entry(self): """Create a mock registry entry.""" return Mock(spec=PolymorphicRegistryEntry) @pytest.fixture def single_secret_entry_data(self): """Sample entry data with a single secret property.""" return { "spec": { "connectionSpecification": { "properties": { "password": {"type": "string", "airbyte_secret": True}, "username": {"type": "string", "airbyte_secret": False}, } } } } @pytest.fixture def multiple_secrets_entry_data(self): """Sample entry data with multiple secret properties.""" return { "spec": { "connectionSpecification": { "properties": { "password": {"type": "string", "airbyte_secret": True}, "api_key": {"type": "string", "airbyte_secret": True}, "username": {"type": "string", "airbyte_secret": False}, } } } } @pytest.fixture def nested_secrets_entry_data(self): """Sample entry data with nested secret properties.""" return { "spec": { "connectionSpecification": { "properties": { "oauth": { "type": "object", "properties": { "client_secret": {"type": "string", "airbyte_secret": True}, "client_id": {"type": "string", "airbyte_secret": False}, }, }, "username": {"type": "string", "airbyte_secret": False}, } } } } @pytest.fixture def deeply_nested_secrets_entry_data(self): """Sample entry data with deeply nested secret properties.""" return { "spec": { "connectionSpecification": { "properties": { "connection": { "type": "object", "properties": { "auth": { "type": "object", "properties": { "credentials": { "type": "object", "properties": {"secret_token": {"type": "string", "airbyte_secret": True}}, } }, } }, } } } } } @pytest.fixture def no_secrets_entry_data(self): """Sample entry data with no secret properties.""" return { "spec": { "connectionSpecification": { "properties": {"username": {"type": "string", "airbyte_secret": False}, "host": {"type": "string"}} } } } @pytest.mark.parametrize( "entry_data_fixture,expected_secrets,description", [ ("single_secret_entry_data", {"password"}, "single secret property"), ("multiple_secrets_entry_data", {"password", "api_key"}, "multiple secret properties"), ("nested_secrets_entry_data", {"client_secret"}, "nested secret property"), ("deeply_nested_secrets_entry_data", {"secret_token"}, "deeply nested secret property"), ("no_secrets_entry_data", set(), "no secret properties"), ], ) def test_get_specs_secrets_valid_structures(self, mock_registry_entry, entry_data_fixture, expected_secrets, description, request): """Test extraction from various valid entry structures.""" entry_data = request.getfixturevalue(entry_data_fixture) with patch("metadata_service.specs_secrets_mask.to_json_sanitized_dict") as mock_sanitize: mock_sanitize.return_value = entry_data result = _get_specs_secrets_from_registry_entries([mock_registry_entry]) assert result == expected_secrets, f"Failed for {description}" mock_sanitize.assert_called_once_with(mock_registry_entry) def test_get_specs_secrets_multiple_entries_aggregation(self, mock_registry_entry, single_secret_entry_data, nested_secrets_entry_data): """Test that secrets from multiple entries are properly aggregated.""" entry1 = Mock(spec=PolymorphicRegistryEntry) entry2 = Mock(spec=PolymorphicRegistryEntry) with patch("metadata_service.specs_secrets_mask.to_json_sanitized_dict") as mock_sanitize: mock_sanitize.side_effect = [single_secret_entry_data, nested_secrets_entry_data] result = _get_specs_secrets_from_registry_entries([entry1, entry2]) assert result == {"password", "client_secret"} assert mock_sanitize.call_count == 2 def test_get_specs_secrets_duplicate_secrets_handling(self, single_secret_entry_data): """Test that duplicate secret names from different entries are handled correctly.""" entry1 = Mock(spec=PolymorphicRegistryEntry) entry2 = Mock(spec=PolymorphicRegistryEntry) with patch("metadata_service.specs_secrets_mask.to_json_sanitized_dict") as mock_sanitize: mock_sanitize.return_value = single_secret_entry_data result = _get_specs_secrets_from_registry_entries([entry1, entry2]) assert result == {"password"} assert mock_sanitize.call_count == 2 def test_get_specs_secrets_empty_entries_list(self): """Test behavior with empty entries list.""" result = _get_specs_secrets_from_registry_entries([]) assert result == set() def test_get_specs_secrets_complex_real_world_structure(self, mock_registry_entry): """Test with realistic connector specification structure.""" complex_entry_data = { "spec": { "connectionSpecification": { "properties": { "host": {"type": "string"}, "port": {"type": "integer"}, "database": {"type": "string"}, "credentials": { "type": "object", "oneOf": [ { "properties": { "auth_type": {"type": "string", "const": "username_password"}, "username": {"type": "string"}, "password": {"type": "string", "airbyte_secret": True}, } }, { "properties": { "auth_type": {"type": "string", "const": "oauth2"}, "client_id": {"type": "string"}, "client_secret": {"type": "string", "airbyte_secret": True}, "refresh_token": {"type": "string", "airbyte_secret": True}, } }, ], }, "ssl_config": { "type": "object", "properties": {"ssl_mode": {"type": "string"}, "client_key": {"type": "string", "airbyte_secret": True}}, }, } } } } with patch("metadata_service.specs_secrets_mask.to_json_sanitized_dict") as mock_sanitize: mock_sanitize.return_value = complex_entry_data result = _get_specs_secrets_from_registry_entries([mock_registry_entry]) expected_secrets = {"password", "client_secret", "refresh_token", "client_key"} assert result == expected_secrets
TestGetSpecsSecretsFromRegistryEntries
python
huggingface__transformers
src/transformers/models/dia/modular_dia.py
{ "start": 3532, "end": 4711 }
class ____(LlamaAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Union[DiaEncoderConfig, DiaDecoderConfig], layer_idx: int, is_causal: bool = False): nn.Module.__init__(self) self.config = config self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.num_heads = self.config.num_attention_heads self.num_key_value_heads = self.config.num_key_value_heads or self.num_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.head_dim = getattr(config, "head_dim", config.hidden_size // self.num_heads) self.scaling = 1 self.attention_dropout = 0.0 self.is_causal = is_causal self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
DiaSelfAttention
python
kamyu104__LeetCode-Solutions
Python/decode-the-slanted-ciphertext.py
{ "start": 864, "end": 1333 }
class ____(object): def decodeCiphertext(self, encodedText, rows): """ :type encodedText: str :type rows: int :rtype: str """ cols = len(encodedText)//rows result = [] for i in xrange(cols): for j in xrange(i, len(encodedText), cols+1): result.append(encodedText[j]) while result and result[-1] == ' ': result.pop() return "".join(result)
Solution2
python
tox-dev__tox
src/tox/config/cli/parser.py
{ "start": 868, "end": 3952 }
class ____(ArgumentParser): """Argument parser which updates its defaults by checking the configuration files and environmental variables.""" def __init__(self, *args: Any, **kwargs: Any) -> None: # sub-parsers also construct an instance of the parser, but they don't get their own file config, but inherit self.file_config = kwargs.pop("file_config") if "file_config" in kwargs else IniConfig() kwargs["epilog"] = self.file_config.epilog super().__init__(*args, **kwargs) def fix_defaults(self) -> None: for action in self._actions: self.fix_default(action) def fix_default(self, action: Action) -> None: if hasattr(action, "default") and hasattr(action, "dest") and action.default != SUPPRESS: of_type = self.get_type(action) key = action.dest outcome = get_env_var(key, of_type=of_type) if outcome is None and self.file_config: outcome = self.file_config.get(key, of_type=of_type) if outcome is not None: action.default, default_value = outcome action.default_source = default_value # type: ignore[attr-defined] if isinstance(action, argparse._SubParsersAction): # noqa: SLF001 for values in action.choices.values(): if not isinstance(values, ToxParser): # pragma: no cover msg = "detected sub-parser added without using our own add command" raise RuntimeError(msg) # noqa: TRY004 values.fix_defaults() @staticmethod def get_type(action: Action) -> type[Any]: of_type: type[Any] | None = getattr(action, "of_type", None) if of_type is None: if isinstance(action, argparse._AppendAction): # noqa: SLF001 of_type = list[action.type] # type: ignore[name-defined] elif isinstance(action, argparse._StoreAction) and action.choices: # noqa: SLF001 loc = locals() loc["Literal"] = Literal as_literal = f"Literal[{', '.join(repr(i) for i in action.choices)}]" of_type = eval(as_literal, globals(), loc) # noqa: S307 elif action.default is not None: of_type = type(action.default) elif isinstance(action, argparse._StoreConstAction) and action.const is not None: # noqa: SLF001 of_type = type(action.const) else: raise TypeError(action) return of_type def parse_args( # type: ignore[override] # avoid defining all overloads self, args: Sequence[str] | None = None, namespace: Namespace | None = None, ) -> Namespace: res, argv = self.parse_known_args(args, namespace) if argv: self.error( f"unrecognized arguments: {' '.join(argv)}\n" "hint: if you tried to pass arguments to a command use -- to separate them from tox ones", ) return res
ArgumentParserWithEnvAndConfig
python
Textualize__textual
tests/command_palette/test_command_source_environment.py
{ "start": 231, "end": 577 }
class ____(Provider): environment: set[tuple[App, Screen, Widget | None]] = set() async def search(self, _: str) -> Hits: def goes_nowhere_does_nothing() -> None: pass SimpleSource.environment.add((self.app, self.screen, self.focused)) yield Hit(1, "Hit", goes_nowhere_does_nothing, "Hit")
SimpleSource
python
google__pytype
pytype/tests/test_typeis.py
{ "start": 72, "end": 2463 }
class ____(test_base.BaseTest): """Tests for typing.TypeIs.""" def test_positive_narrowing(self): self.Check(""" from typing_extensions import TypeIs def is_str(val: object) -> TypeIs[str]: return isinstance(val, str) def f(val: object): if is_str(val): assert_type(val, str) """) def test_negative_narrowing(self): self.Check(""" from typing import Union from typing_extensions import TypeIs def is_str(val: object) -> TypeIs[str]: return isinstance(val, str) def f(val: Union[int, str]): if is_str(val): assert_type(val, str) else: assert_type(val, int) """) def test_keep_more_specific_type(self): self.Check(""" from typing import Any, Sequence, Union from typing_extensions import TypeIs def is_sequence(val: object) -> TypeIs[Sequence[Any]]: return isinstance(val, Sequence) def f(val: Union[int, Sequence[int]]): if is_sequence(val): assert_type(val, Sequence[int]) else: assert_type(val, int) """) def test_check_compatibility(self): errors = self.CheckWithErrors(""" from typing_extensions import TypeIs def is_str(val: int) -> TypeIs[str]: # invalid-function-definition[e] return isinstance(val, str) """) self.assertErrorSequences(errors, {"e": ["TypeIs[str]", "input type int"]}) def test_pyi(self): with self.DepTree([( "foo.pyi", """ from typing_extensions import TypeIs def is_str(val: object) -> TypeIs[str]: ... """, )]): self.Check(""" import foo from typing import Union def f(val: object): if foo.is_str(val): assert_type(val, str) def g(val: Union[int, str]): if foo.is_str(val): assert_type(val, str) else: assert_type(val, int) """) def test_reingest(self): with self.DepTree([( "foo.py", """ from typing_extensions import TypeIs def is_str(val: object) -> TypeIs[str]: return isinstance(val, str) """, )]): self.Check(""" import foo def f(val: object): if foo.is_str(val): assert_type(val, str) """) if __name__ == "__main__": test_base.main()
TypeIsTest
python
getsentry__sentry
src/sentry/seer/autofix/utils.py
{ "start": 1627, "end": 2035 }
class ____(StrEnum): PENDING = "pending" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" @classmethod def from_cursor_status(cls, cursor_status: str) -> "CodingAgentStatus | None": status_mapping = { "FINISHED": cls.COMPLETED, "ERROR": cls.FAILED, } return status_mapping.get(cursor_status.upper(), None)
CodingAgentStatus
python
walkccc__LeetCode
solutions/714. Best Time to Buy and Sell Stock with Transaction Fee/714.py
{ "start": 0, "end": 232 }
class ____: def maxProfit(self, prices: list[int], fee: int) -> int: sell = 0 hold = -math.inf for price in prices: sell = max(sell, hold + price) hold = max(hold, sell - price - fee) return sell
Solution
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_theme06.py
{ "start": 350, "end": 2146 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_theme06.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with chart formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line", "subtype": "stacked"}) chart.axis_ids = [68411392, 68414848] # Add some test data for the chart(s). for row_num in range(8): for col_num in range(6): worksheet.write_number(row_num, col_num, 1) chart.add_series( { "values": ["Sheet1", 0, 0, 7, 0], "line": {"color": Color((4, 0))}, } ) chart.add_series( { "values": ["Sheet1", 0, 1, 7, 1], "line": {"color": Color((4, 1))}, } ) chart.add_series( { "values": ["Sheet1", 0, 2, 7, 2], "line": {"color": Color((4, 2))}, } ) chart.add_series( { "values": ["Sheet1", 0, 3, 7, 3], "line": {"color": Color((4, 3))}, } ) chart.add_series( { "values": ["Sheet1", 0, 4, 7, 4], "line": {"color": Color((4, 4))}, } ) chart.add_series( { "values": ["Sheet1", 0, 5, 7, 5], "line": {"color": Color((4, 5))}, } ) worksheet.insert_chart(8, 7, chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_image_bytes03.py
{ "start": 339, "end": 996 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("image03.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() with open(self.image_dir + "red.jpg", "rb") as image_file: image_data = BytesIO(image_file.read()) worksheet.insert_image("E9", "red.jpg", {"image_data": image_data}) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
euske__pdfminer
pdfminer/pdfdocument.py
{ "start": 16113, "end": 26711 }
class ____: """PDFDocument object represents a PDF document. Since a PDF file can be very big, normally it is not loaded at once. So PDF document has to cooperate with a PDF parser in order to dynamically import the data as processing goes. Typical usage: doc = PDFDocument(parser, password) obj = doc.getobj(objid) """ security_handler_registry = { 1: PDFStandardSecurityHandler, 2: PDFStandardSecurityHandler, } if AES is not None: security_handler_registry[4] = PDFStandardSecurityHandlerV4 if SHA256 is not None: security_handler_registry[5] = PDFStandardSecurityHandlerV5 debug = 0 def __init__(self, parser, password=b'', caching=True, fallback=True): "Set the document to use a given PDFParser object." self.caching = caching self.xrefs = [] self.info = [] self.catalog = None self.encryption = None self.decipher = None self._parser = None self._cached_objs = {} self._parsed_objs = {} self._parser = parser self._parser.set_document(self) self.is_printable = self.is_modifiable = self.is_extractable = True # Retrieve the information of each header that was appended # (maybe multiple times) at the end of the document. try: pos = self.find_xref(parser) self.read_xref_from(parser, pos, self.xrefs) except PDFNoValidXRef: fallback = True if fallback: parser.fallback = True xref = PDFXRefFallback() xref.load(parser) self.xrefs.append(xref) for xref in self.xrefs: trailer = xref.get_trailer() if not trailer: continue # If there's an encryption info, remember it. if 'Encrypt' in trailer: #assert not self.encryption self.encryption = (list_value(trailer['ID']), dict_value(trailer['Encrypt'])) self._initialize_password(password) if 'Info' in trailer: self.info.append(dict_value(trailer['Info'])) if 'Root' in trailer: # Every PDF file must have exactly one /Root dictionary. self.catalog = dict_value(trailer['Root']) break else: raise PDFSyntaxError('No /Root object! - Is this really a PDF?') if self.catalog.get('Type') is not LITERAL_CATALOG: if STRICT: raise PDFSyntaxError('Catalog not found!') return # _initialize_password(password=b'') # Perform the initialization with a given password. def _initialize_password(self, password=b''): (docid, param) = self.encryption if literal_name(param.get('Filter')) != 'Standard': raise PDFEncryptionError('Unknown filter: param=%r' % param) v = int_value(param.get('V', 0)) factory = self.security_handler_registry.get(v) if factory is None: raise PDFEncryptionError('Unknown algorithm: param=%r' % param) handler = factory(docid, param, password) self.decipher = handler.decrypt self.is_printable = handler.is_printable() self.is_modifiable = handler.is_modifiable() self.is_extractable = handler.is_extractable() self._parser.fallback = False # need to read streams with exact length return def _getobj_objstm(self, stream, index, objid): if stream.objid in self._parsed_objs: (objs, n) = self._parsed_objs[stream.objid] else: (objs, n) = self._get_objects(stream) if self.caching: self._parsed_objs[stream.objid] = (objs, n) i = n*2+index try: obj = objs[i] except IndexError: raise PDFSyntaxError('index too big: %r' % index) return obj def _get_objects(self, stream): if stream.get('Type') is not LITERAL_OBJSTM: if STRICT: raise PDFSyntaxError('Not a stream object: %r' % stream) try: n = stream['N'] except KeyError: if STRICT: raise PDFSyntaxError('N is not defined: %r' % stream) n = 0 parser = PDFStreamParser(stream.get_data()) parser.set_document(self) objs = [] try: while 1: (_, obj) = parser.nextobject() objs.append(obj) except PSEOF: pass return (objs, n) KEYWORD_OBJ = KWD(b'obj') def _getobj_parse(self, pos, objid): self._parser.seek(pos) (_, objid1) = self._parser.nexttoken() # objid if objid1 != objid: raise PDFSyntaxError('objid mismatch: %r=%r' % (objid1, objid)) (_, genno) = self._parser.nexttoken() # genno (_, kwd) = self._parser.nexttoken() if kwd is not self.KEYWORD_OBJ: raise PDFSyntaxError('Invalid object spec: offset=%r' % pos) (_, obj) = self._parser.nextobject() return obj # can raise PDFObjectNotFound def getobj(self, objid): assert objid != 0 if not self.xrefs: raise PDFException('PDFDocument is not initialized') if self.debug: logging.debug('getobj: objid=%r' % objid) if objid in self._cached_objs: (obj, genno) = self._cached_objs[objid] else: for xref in self.xrefs: try: (strmid, index, genno) = xref.get_pos(objid) except KeyError: continue try: if strmid is not None: stream = stream_value(self.getobj(strmid)) obj = self._getobj_objstm(stream, index, objid) else: obj = self._getobj_parse(index, objid) if self.decipher: obj = decipher_all(self.decipher, objid, genno, obj) if isinstance(obj, PDFStream): obj.set_objid(objid, genno) break except (PSEOF, PDFSyntaxError): continue else: raise PDFObjectNotFound(objid) if self.debug: logging.debug('register: objid=%r: %r' % (objid, obj)) if self.caching: self._cached_objs[objid] = (obj, genno) return obj def get_outlines(self): if 'Outlines' not in self.catalog: raise PDFNoOutlines def search(entry, level): entry = dict_value(entry) if 'Title' in entry: if 'A' in entry or 'Dest' in entry: title = decode_text(bytes_value(entry['Title'])) dest = entry.get('Dest') action = entry.get('A') se = entry.get('SE') yield (level, title, dest, action, se) if 'First' in entry and 'Last' in entry: for x in search(entry['First'], level+1): yield x if 'Next' in entry: for x in search(entry['Next'], level): yield x return return search(self.catalog['Outlines'], 0) def lookup_name(self, cat, key): try: names = dict_value(self.catalog['Names']) except (PDFTypeError, KeyError): raise KeyError((cat, key)) # may raise KeyError d0 = dict_value(names[cat]) def lookup(d): if 'Limits' in d: (k1, k2) = list_value(d['Limits']) if key < k1 or k2 < key: return None if 'Names' in d: objs = list_value(d['Names']) names = dict(choplist(2, objs)) return names[key] if 'Kids' in d: for c in list_value(d['Kids']): v = lookup(dict_value(c)) if v: return v raise KeyError((cat, key)) return lookup(d0) def get_dest(self, name): try: # PDF-1.2 or later obj = self.lookup_name('Dests', name) except KeyError: # PDF-1.1 or prior if 'Dests' not in self.catalog: raise PDFDestinationNotFound(name) d0 = dict_value(self.catalog['Dests']) if name not in d0: raise PDFDestinationNotFound(name) obj = d0[name] return obj # find_xref def find_xref(self, parser): """Internal function used to locate the first XRef.""" # search the last xref table by scanning the file backwards. prev = None for line in parser.revreadlines(): line = line.strip() if self.debug: logging.debug('find_xref: %r' % line) if line == b'startxref': break if line: prev = line else: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('xref found: pos=%r' % prev) return int(prev) # read xref table def read_xref_from(self, parser, start, xrefs): """Reads XRefs from the given location.""" parser.seek(start) parser.reset() try: (pos, token) = parser.nexttoken() except PSEOF: raise PDFNoValidXRef('Unexpected EOF') if self.debug: logging.info('read_xref_from: start=%d, token=%r' % (start, token)) if isinstance(token, int): # XRefStream: PDF-1.5 parser.seek(pos) parser.reset() xref = PDFXRefStream() xref.load(parser) else: if token is parser.KEYWORD_XREF: parser.nextline() xref = PDFXRef() xref.load(parser) xrefs.append(xref) trailer = xref.get_trailer() if self.debug: logging.info('trailer: %r' % trailer) if 'XRefStm' in trailer: pos = int_value(trailer['XRefStm']) self.read_xref_from(parser, pos, xrefs) if 'Prev' in trailer: # find previous xref pos = int_value(trailer['Prev']) self.read_xref_from(parser, pos, xrefs) return
PDFDocument
python
networkx__networkx
networkx/classes/tests/test_digraph.py
{ "start": 11231, "end": 12283 }
class ____(_TestGraphEdgeSubgraph): """Unit tests for the :meth:`DiGraph.edge_subgraph` method.""" def setup_method(self): # Create a doubly-linked path graph on five nodes. G = nx.DiGraph(nx.path_graph(5)) # Add some node, edge, and graph attributes. for i in range(5): G.nodes[i]["name"] = f"node{i}" G.edges[0, 1]["name"] = "edge01" G.edges[3, 4]["name"] = "edge34" G.graph["name"] = "graph" # Get the subgraph induced by the first and last edges. self.G = G self.H = G.edge_subgraph([(0, 1), (3, 4)]) def test_pred_succ(self): """Test that nodes are added to predecessors and successors. For more information, see GitHub issue #2370. """ G = nx.DiGraph() G.add_edge(0, 1) H = G.edge_subgraph([(0, 1)]) assert list(H.predecessors(0)) == [] assert list(H.successors(0)) == [1] assert list(H.predecessors(1)) == [0] assert list(H.successors(1)) == []
TestEdgeSubgraph
python
ansible__ansible
test/integration/targets/ansible-doc/broken-docs/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py
{ "start": 614, "end": 831 }
class ____(BaseVarsPlugin): def get_vars(self, loader, path, entities, cache=True): super(VarsModule, self).get_vars(loader, path, entities) return {'collection': 'yes', 'notreal': 'value'}
VarsModule
python
google__pytype
pytype/tools/xref/indexer.py
{ "start": 39155, "end": 39234 }
class ____(Exception): """Wrap exceptions raised by the indexer."""
PytypeError
python
google__jax
jaxlib/pytree_test.py
{ "start": 1313, "end": 1404 }
class ____: a: int b: str registry.register_dataclass_node(Custom, ["a"], ["b"])
Custom
python
doocs__leetcode
solution/1700-1799/1751.Maximum Number of Events That Can Be Attended II/Solution2.py
{ "start": 0, "end": 440 }
class ____: def maxValue(self, events: List[List[int]], k: int) -> int: events.sort(key=lambda x: x[1]) n = len(events) f = [[0] * (k + 1) for _ in range(n + 1)] for i, (st, _, val) in enumerate(events, 1): p = bisect_left(events, st, hi=i - 1, key=lambda x: x[1]) for j in range(1, k + 1): f[i][j] = max(f[i - 1][j], f[p][j - 1] + val) return f[n][k]
Solution
python
great-expectations__great_expectations
great_expectations/compatibility/snowflake.py
{ "start": 3730, "end": 4217 }
class ____: """Namespace for Snowflake dialect types.""" ARRAY = ARRAY BYTEINT = BYTEINT CHARACTER = CHARACTER DEC = DEC DOUBLE = DOUBLE FIXED = FIXED GEOGRAPHY = GEOGRAPHY GEOMETRY = GEOMETRY NUMBER = NUMBER OBJECT = OBJECT STRING = STRING TEXT = TEXT TIMESTAMP_LTZ = TIMESTAMP_LTZ TIMESTAMP_NTZ = TIMESTAMP_NTZ TIMESTAMP_TZ = TIMESTAMP_TZ TINYINT = TINYINT VARBINARY = VARBINARY VARIANT = VARIANT
SNOWFLAKE_TYPES
python
agronholm__apscheduler
src/apscheduler/_events.py
{ "start": 3895, "end": 4265 }
class ____(DataStoreEvent): """ Signals that the deserialization of a schedule has failed. :ivar schedule_id: ID of the schedule that failed to deserialize :ivar exception: the exception that was raised during deserialization """ schedule_id: str exception: BaseException @attrs.define(kw_only=True, frozen=True)
ScheduleDeserializationFailed
python
pytorch__pytorch
benchmarks/dynamo/pr_time_benchmarks/benchmarks/dtensor.py
{ "start": 192, "end": 1110 }
class ____(BenchmarkBase): def __init__(self, operator, world_size) -> None: super().__init__( category=f"dtensor_dispatch_{operator}", device="cuda", ) self.world_size = world_size def name(self) -> str: prefix = f"{self.category()}" return prefix def description(self) -> str: return f"DTensor dispatch time for {self.category()}" def _prepare_once(self) -> None: self.mesh = torch.distributed.device_mesh.init_device_mesh( "cuda", (self.world_size,), mesh_dim_names=("dp",) ) self.a = DTensor.from_local( torch.ones(10, 10, device=self.device()), self.mesh, [Replicate()] ) self.b = DTensor.from_local( torch.ones(10, 10, device=self.device()), self.mesh, [Replicate()] ) def _prepare(self) -> None: pass
BenchmarkDTensorDispatch
python
walkccc__LeetCode
solutions/1960. Maximum Product of the Length of Two Palindromic Substrings/1960-2.py
{ "start": 0, "end": 1939 }
class ____: def maxProduct(self, s: str) -> int: BASE = 26 HASH = 1_000_000_007 n = len(s) pows = [1] # pows[i] := BASE^i % HASH # hashL[i] = the hash of the first i letters of s, where hashL[i] = # (26^(i - 1) * s[0] + 26^(i - 2) * s[1] + ... + s[i - 1]) % HASH hashL = [0] # hashR[i] = the hash of the last i letters of s, where hashR[i] = # (26^(i - 1) * s[-1] + 26^(i - 2) * s[-2] + ... + s[-i]) % HASH hashR = [0] # maxLeft[i] := the maximum odd length of palindromes in s[0..i] maxLeft = [0] * n # maxRight[i] := the maximum odd length of palindromes in s[i..n - 1] maxRight = [0] * n def val(c: str) -> int: return ord(c) - ord('a') for _ in range(n): pows.append(pows[-1] * BASE % HASH) for c in s: hashL.append((hashL[-1] * BASE + val(c)) % HASH) for c in reversed(s): hashR.append((hashR[-1] * BASE + val(c)) % HASH) hashR.reverse() def getLeftRollingHash(l: int, r: int) -> int: """Returns the left rolling hash of s[l..r).""" h = (hashL[r] - hashL[l] * pows[r - l]) % HASH return h + HASH if h < 0 else h def getRightRollingHash(l: int, r: int) -> int: """Returns the right rolling hash of s[l..r).""" h = (hashR[l] - hashR[r] * pows[r - l]) % HASH return h + HASH if h < 0 else h def isPalindrome(l: int, r: int) -> bool: """Returns True if s[l..r) is a palindrome.""" return getLeftRollingHash(l, r) == getRightRollingHash(l, r) maxLength = 1 for r in range(n): l = (r - maxLength - 2) + 1 if l >= 0 and isPalindrome(l, r + 1): maxLength += 2 maxLeft[r] = maxLength maxLength = 1 for l in reversed(range(n)): r = (l + maxLength + 2) - 1 if r < n and isPalindrome(l, r + 1): maxLength += 2 maxRight[l] = maxLength return max(maxLeft[i - 1] * maxRight[i] for i in range(1, n))
Solution
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_poly_loading.py
{ "start": 16314, "end": 38388 }
class ____(GeometryFixtureBase): def test_threelevel_selectin_to_inline_mapped(self): self._fixture_from_geometry( { "a": { "subclasses": { "b": {"polymorphic_load": "selectin"}, "c": { "subclasses": { "d": { "polymorphic_load": "inline", "single": True, }, "e": { "polymorphic_load": "inline", "single": True, }, }, "polymorphic_load": "selectin", }, } } } ) a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all([d(d_data="d1"), e(e_data="e1")]) sess.commit() q = sess.query(a) result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a", {}, ), Or( CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data, " "c.d_data AS c_d_data, c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), ) with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) @testing.fixture def threelevel_all_selectin_fixture(self): self._fixture_from_geometry( { "a": { "subclasses": { "b": {"polymorphic_load": "selectin"}, "c": { "subclasses": { "d": { "polymorphic_load": "selectin", }, "e": { "polymorphic_load": "selectin", }, "f": {}, }, "polymorphic_load": "selectin", }, } } } ) def test_threelevel_all_selectin_l1_load_l3( self, threelevel_all_selectin_fixture ): """test for #9373 - load base to receive level 3 endpoints""" a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all( [d(c_data="cd1", d_data="d1"), e(c_data="ce1", e_data="e1")] ) sess.commit() for i in range(3): sess.close() q = sess.query(a) result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a", {}, ), CompiledSQL( "SELECT d.id AS d_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, c.c_data AS c_c_data, " "d.d_data AS d_d_data " "FROM a JOIN c ON a.id = c.id JOIN d ON c.id = d.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1]}], ), CompiledSQL( "SELECT e.id AS e_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, c.c_data AS c_c_data, " "e.e_data AS e_e_data " "FROM a JOIN c ON a.id = c.id JOIN e ON c.id = e.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [2]}], ), ) with self.assert_statement_count(testing.db, 0): eq_( result, [ d(c_data="cd1", d_data="d1"), e(c_data="ce1", e_data="e1"), ], ) def test_threelevel_partial_selectin_l1_load_l3( self, threelevel_all_selectin_fixture ): """test for #9373 - load base to receive level 3 endpoints""" a, b, c, d, f = self.classes("a", "b", "c", "d", "f") sess = fixture_session() sess.add_all( [d(c_data="cd1", d_data="d1"), f(c_data="ce1", f_data="e1")] ) sess.commit() for i in range(3): sess.close() q = sess.query(a) result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a", {}, ), CompiledSQL( "SELECT d.id AS d_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, c.c_data AS c_c_data, " "d.d_data AS d_d_data " "FROM a JOIN c ON a.id = c.id JOIN d ON c.id = d.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1]}], ), # only loads pk 2 - this is the filtering inside of do_load CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data " "FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [2]}], ), # no more SQL; if we hit pk 1 again, it would re-do the d here ) with self.sql_execution_asserter(testing.db) as asserter_: eq_( result, [ d(c_data="cd1", d_data="d1"), f(c_data="ce1", f_data="e1"), ], ) # f was told not to load its attrs, so they load here asserter_.assert_( CompiledSQL( "SELECT f.f_data AS f_f_data FROM f WHERE :param_1 = f.id", [{"param_1": 2}], ), ) def test_threelevel_all_selectin_l1_load_l2( self, threelevel_all_selectin_fixture ): """test for #9373 - load base to receive level 2 endpoint""" a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all([c(c_data="c1", a_data="a1")]) sess.commit() q = sess.query(a) result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a", {}, ), CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) ORDER BY a.id", {"primary_keys": [1]}, ), ) with self.assert_statement_count(testing.db, 0): eq_( result, [c(c_data="c1", a_data="a1")], ) @testing.variation("use_aliased_class", [True, False]) def test_threelevel_all_selectin_l2_load_l3( self, threelevel_all_selectin_fixture, use_aliased_class ): """test for #9373 - load level 2 endpoing to receive level 3 endpoints""" a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all( [d(c_data="cd1", d_data="d1"), e(c_data="ce1", e_data="e1")] ) sess.commit() if use_aliased_class: q = sess.query(aliased(c, flat=True)) else: q = sess.query(c) result = self.assert_sql_execution( testing.db, q.all, Conditional( bool(use_aliased_class), [ CompiledSQL( "SELECT c_1.id AS c_1_id, a_1.id AS a_1_id, " "a_1.type AS a_1_type, a_1.a_data AS a_1_a_data, " "c_1.c_data AS c_1_c_data " "FROM a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id", {}, ) ], [ CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data, c.c_data AS c_c_data " "FROM a JOIN c ON a.id = c.id", {}, ) ], ), CompiledSQL( "SELECT d.id AS d_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, d.d_data AS d_d_data " "FROM a JOIN c ON a.id = c.id JOIN d ON c.id = d.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) ORDER BY a.id", [{"primary_keys": [1]}], ), CompiledSQL( "SELECT e.id AS e_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, e.e_data AS e_e_data " "FROM a JOIN c ON a.id = c.id JOIN e ON c.id = e.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) ORDER BY a.id", [{"primary_keys": [2]}], ), ) with self.assert_statement_count(testing.db, 0): eq_( result, [d(c_data="cd1", d_data="d1"), e(c_data="ce1", e_data="e1")], ) def test_threelevel_selectin_to_inline_options(self): self._fixture_from_geometry( { "a": { "subclasses": { "b": {}, "c": { "subclasses": { "d": {"single": True}, "e": {"single": True}, } }, } } } ) a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all([d(d_data="d1"), e(e_data="e1")]) sess.commit() c_alias = with_polymorphic(c, (d, e)) q = sess.query(a).options(selectin_polymorphic(a, [b, c_alias])) result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a", {}, ), Or( CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, c.id AS c_id, " "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data, c.d_data AS c_d_data, " "c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), ) with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) @testing.variation("include_intermediary_row", [True, False]) def test_threelevel_load_only_3lev(self, include_intermediary_row): """test issue #11327""" self._fixture_from_geometry( { "a": { "subclasses": { "b": {"subclasses": {"c": {}}}, } } } ) a, b, c = self.classes("a", "b", "c") sess = fixture_session() sess.add(c(a_data="a1", b_data="b1", c_data="c1")) if include_intermediary_row: sess.add(b(a_data="a1", b_data="b1")) sess.commit() sess = fixture_session() pks = [] c_pks = [] with self.sql_execution_asserter(testing.db) as asserter: for obj in sess.scalars( select(a) .options(selectin_polymorphic(a, classes=[b, c])) .order_by(a.id) ): assert "b_data" in obj.__dict__ if isinstance(obj, c): assert "c_data" in obj.__dict__ c_pks.append(obj.id) pks.append(obj.id) asserter.assert_( CompiledSQL( "SELECT a.id, a.type, a.a_data FROM a ORDER BY a.id", {} ), AllOf( CompiledSQL( "SELECT c.id AS c_id, b.id AS b_id, a.id AS a_id, " "a.type AS a_type, c.c_data AS c_c_data FROM a JOIN b " "ON a.id = b.id JOIN c ON b.id = c.id WHERE a.id IN " "(__[POSTCOMPILE_primary_keys]) ORDER BY a.id", [{"primary_keys": c_pks}], ), CompiledSQL( "SELECT b.id AS b_id, a.id AS a_id, a.type AS a_type, " "b.b_data AS b_b_data FROM a JOIN b ON a.id = b.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": pks}], ), ), ) @testing.combinations((True,), (False,)) def test_threelevel_selectin_to_inline_awkward_alias_options( self, use_aliased_class ): self._fixture_from_geometry( { "a": { "subclasses": { "b": {}, "c": {"subclasses": {"d": {}, "e": {}}}, } } } ) a, b, c, d, e = self.classes("a", "b", "c", "d", "e") sess = fixture_session() sess.add_all( [d(c_data="c1", d_data="d1"), e(c_data="c2", e_data="e1")] ) sess.commit() from sqlalchemy import select a_table, c_table, d_table, e_table = self.tables("a", "c", "d", "e") poly = ( select(a_table.c.id, a_table.c.type, c_table, d_table, e_table) .select_from( a_table.join(c_table).outerjoin(d_table).outerjoin(e_table) ) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias("poly") ) c_alias = with_polymorphic(c, (d, e), poly) if use_aliased_class: opt = selectin_polymorphic(a, [b, c_alias]) else: opt = selectin_polymorphic( a, [b, c_alias, d, e], ) q = sess.query(a).options(opt).order_by(a.id) if use_aliased_class: result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a ORDER BY a.id", {}, ), Or( # here, the test is that the adaptation of "a" takes place CompiledSQL( "SELECT poly.c_id AS poly_c_id, " "poly.a_type AS poly_a_type, " "poly.a_id AS poly_a_id, poly.c_c_data " "AS poly_c_c_data, " "poly.e_id AS poly_e_id, poly.e_e_data " "AS poly_e_e_data, " "poly.d_id AS poly_d_id, poly.d_d_data " "AS poly_d_d_data " "FROM (SELECT a.id AS a_id, a.type AS a_type, " "c.id AS c_id, " "c.c_data AS c_c_data, d.id AS d_id, " "d.d_data AS d_d_data, " "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " "LEFT OUTER JOIN e ON c.id = e.id) AS poly " "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY poly.a_id", [{"primary_keys": [1, 2]}], ), CompiledSQL( "SELECT poly.c_id AS poly_c_id, " "poly.a_id AS poly_a_id, poly.a_type AS poly_a_type, " "poly.c_c_data AS poly_c_c_data, " "poly.d_id AS poly_d_id, poly.d_d_data " "AS poly_d_d_data, " "poly.e_id AS poly_e_id, poly.e_e_data " "AS poly_e_e_data " "FROM (SELECT a.id AS a_id, a.type AS a_type, " "c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, " "d.d_data AS d_d_data, e.id AS e_id, " "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " "LEFT OUTER JOIN d ON c.id = d.id " "LEFT OUTER JOIN e ON c.id = e.id) AS poly " "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY poly.a_id", [{"primary_keys": [1, 2]}], ), ), ) else: result = self.assert_sql_execution( testing.db, q.all, CompiledSQL( "SELECT a.id AS a_id, a.type AS a_type, " "a.a_data AS a_a_data FROM a ORDER BY a.id", {}, ), AllOf( # note this query is added due to the fix made in # #11327 CompiledSQL( "SELECT c.id AS c_id, a.id AS a_id, a.type AS a_type, " "c.c_data AS c_c_data FROM a JOIN c ON a.id = c.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( "SELECT d.id AS d_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, d.d_data AS d_d_data FROM a " "JOIN c ON a.id = c.id JOIN d ON c.id = d.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [1]}], ), CompiledSQL( "SELECT e.id AS e_id, c.id AS c_id, a.id AS a_id, " "a.type AS a_type, e.e_data AS e_e_data FROM a " "JOIN c ON a.id = c.id JOIN e ON c.id = e.id " "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", [{"primary_keys": [2]}], ), ), ) with self.assert_statement_count(testing.db, 0): eq_( result, [d(c_data="c1", d_data="d1"), e(c_data="c2", e_data="e1")], ) def test_partial_load_no_invoke_eagers(self): # test issue #4199 self._fixture_from_geometry( { "a": { "subclasses": { "a1": {"polymorphic_load": "selectin"}, "a2": {"polymorphic_load": "selectin"}, } } } ) a, a1, a2 = self.classes("a", "a1", "a2") sess = fixture_session() a1_obj = a1() a2_obj = a2() sess.add_all([a1_obj, a2_obj]) del a2_obj sess.flush() sess.expire_all() # _with_invoke_all_eagers(False), used by the lazy loader # strategy, will cause one less state to be present such that # the poly loader won't locate a state limited to the "a1" mapper, # needs to test that it has states sess.query(a)._with_invoke_all_eagers(False).all()
TestGeometries
python
getsentry__sentry
src/sentry/api/endpoints/project_filter_details.py
{ "start": 815, "end": 3945 }
class ____(ProjectEndpoint): publish_status = { "PUT": ApiPublishStatus.PUBLIC, } @extend_schema( operation_id="Update an Inbound Data Filter", parameters=[ GlobalParams.ORG_ID_OR_SLUG, GlobalParams.PROJECT_ID_OR_SLUG, ProjectParams.FILTER_ID, ], request=_LegacyBrowserFilterSerializer, responses={ 204: RESPONSE_NO_CONTENT, 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, examples=None, ) def put(self, request: Request, project, filter_id) -> Response: """ Update various inbound data filters for a project. """ for flt in inbound_filters.get_all_filter_specs(): if flt.id == filter_id: current_filter = flt break else: raise ResourceDoesNotExist # could not find filter with the requested id serializer = current_filter.serializer_cls(data=request.data, partial=True) if not serializer.is_valid(): return Response(serializer.errors, status=400) current_state = inbound_filters.get_filter_state(filter_id, project) if isinstance(current_state, list): current_state = set(current_state) new_state = inbound_filters.set_filter_state(filter_id, project, serializer.validated_data) if isinstance(new_state, list): new_state = set(new_state) audit_log_state = audit_log.get_event_id("PROJECT_ENABLE") returned_state = None if filter_id == "legacy-browsers": if isinstance(current_state, bool) or isinstance(new_state, bool): returned_state = new_state if not new_state: audit_log_state = audit_log.get_event_id("PROJECT_DISABLE") elif current_state - new_state: returned_state = current_state - new_state audit_log_state = audit_log.get_event_id("PROJECT_DISABLE") elif new_state - current_state: returned_state = new_state - current_state elif new_state == current_state: returned_state = new_state if filter_id in ( FilterStatKeys.BROWSER_EXTENSION, FilterStatKeys.LOCALHOST, FilterStatKeys.WEB_CRAWLER, FilterStatKeys.HEALTH_CHECK, ): returned_state = filter_id removed = current_state - new_state if removed == 1: audit_log_state = audit_log.get_event_id("PROJECT_DISABLE") if isinstance(returned_state, Iterable) and not isinstance(returned_state, str): returned_state = list(returned_state) self.create_audit_entry( request=request, organization=project.organization, target_object=project.id, event=audit_log_state, data={"state": returned_state, "slug": project.slug}, ) return Response(status=204)
ProjectFilterDetailsEndpoint
python
astropy__astropy
astropy/units/tests/test_quantity_ufuncs.py
{ "start": 26116, "end": 29432 }
class ____: @pytest.mark.parametrize( "ufunc", [ np.absolute, np.fabs, np.conj, np.conjugate, np.negative, np.spacing, np.rint, np.floor, np.ceil, np.positive, ], ) def test_invariant_scalar(self, ufunc): q_i = 4.7 * u.m q_o = ufunc(q_i) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i.unit assert q_o.value == ufunc(q_i.value) @pytest.mark.parametrize( "ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil] ) def test_invariant_array(self, ufunc): q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_o = ufunc(q_i) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i.unit assert np.all(q_o.value == ufunc(q_i.value)) @pytest.mark.parametrize( "ufunc", [ np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod, ], ) def test_invariant_twoarg_scalar(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.km q_o = ufunc(q_i1, q_i2) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit))) @pytest.mark.parametrize( "ufunc", [ np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod, ], ) def test_invariant_twoarg_array(self, ufunc): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us q_o = ufunc(q_i1, q_i2) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit))) @pytest.mark.parametrize( ("ufunc", "arbitrary"), [ (np.add, 0.0), (np.subtract, 0.0), (np.hypot, 0.0), (np.maximum, 0.0), (np.minimum, 0.0), (np.nextafter, 0.0), (np.remainder, np.inf), (np.mod, np.inf), (np.fmod, np.inf), ], ) def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary): q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s q_o = ufunc(q_i1, arbitrary) assert isinstance(q_o, u.Quantity) assert q_o.unit == q_i1.unit assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary)) @pytest.mark.parametrize( "ufunc", [ np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.nextafter, np.remainder, np.mod, np.fmod, ], ) def test_invariant_twoarg_invalid_units(self, ufunc): q_i1 = 4.7 * u.m q_i2 = 9.4 * u.s with pytest.raises(u.UnitsError, match="compatible dimensions"): ufunc(q_i1, q_i2)
TestInvariantUfuncs
python
pallets__jinja
tests/test_filters.py
{ "start": 418, "end": 604 }
class ____: def __init__(self, value1, value2): self.value1 = value1 self.value2 = value2 def __str__(self): return f"({self.value1},{self.value2})"
Magic2
python
ijl__orjson
test/test_enum.py
{ "start": 773, "end": 2646 }
class ____: def test_cannot_subclass(self): """ enum.Enum cannot be subclassed obj->ob_type->ob_base will always be enum.EnumMeta """ with pytest.raises(TypeError): class Subclass(StrEnum): # type: ignore B = "b" def test_arbitrary_enum(self): assert orjson.dumps(UnspecifiedEnum.A) == b'"a"' assert orjson.dumps(UnspecifiedEnum.B) == b"1" assert orjson.dumps(UnspecifiedEnum.C) == b"1.1" assert orjson.dumps(UnspecifiedEnum.D) == b'{"d":1}' def test_custom_enum(self): assert orjson.dumps(UnspecifiedEnum.E, default=default) == b'"c"' def test_enum_options(self): assert ( orjson.dumps(UnspecifiedEnum.F, option=orjson.OPT_NAIVE_UTC) == b'"1970-01-01T00:00:00+00:00"' ) def test_int_enum(self): assert orjson.dumps(IntEnum.ONE) == b"1" def test_intenum_enum(self): assert orjson.dumps(IntEnumEnum.ONE) == b"1" def test_intflag_enum(self): assert orjson.dumps(IntFlagEnum.ONE) == b"1" def test_flag_enum(self): assert orjson.dumps(FlagEnum.ONE) == b"1" def test_auto_enum(self): assert orjson.dumps(AutoEnum.A) == b'"a"' def test_float_enum(self): assert orjson.dumps(FloatEnum.ONE) == b"1.1" def test_str_enum(self): assert orjson.dumps(StrEnum.AAA) == b'"aaa"' def test_bool_enum(self): with pytest.raises(TypeError): class BoolEnum(bool, enum.Enum): # type: ignore TRUE = True def test_non_str_keys_enum(self): assert ( orjson.dumps({StrEnum.AAA: 1}, option=orjson.OPT_NON_STR_KEYS) == b'{"aaa":1}' ) assert ( orjson.dumps({IntEnum.ONE: 1}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":1}' )
TestEnum
python
encode__django-rest-framework
tests/test_response.py
{ "start": 2302, "end": 2461 }
class ____(APIView): renderer_classes = (BrowsableAPIRenderer, JSONRenderer) def get(self, request, **kwargs): return Response('text')
HTMLView1
python
geekcomputers__Python
Sorting Algorithms/Sorting_List.py
{ "start": 94, "end": 1355 }
class ____: def __init__(self): self.head = None def Insert_At_End(self, new_data): new_node = Node(new_data) if self.head is None: self.head = new_node return current = self.head while current.next: current = current.next current.next = new_node def Sort(self): temp = self.head while temp: minn = temp after = temp.next while after: if minn.data > after.data: minn = after after = after.next key = temp.data temp.data = minn.data minn.data = key temp = temp.next def Display(self): temp = self.head while temp: print(temp.data, "->", end=" ") temp = temp.next print("None") if __name__ == "__main__": L_list = Linked_List() L_list.Insert_At_End(8) L_list.Insert_At_End(5) L_list.Insert_At_End(10) L_list.Insert_At_End(7) L_list.Insert_At_End(6) L_list.Insert_At_End(11) L_list.Insert_At_End(9) print("Linked List: ") L_list.Display() print("Sorted Linked List: ") L_list.Sort() L_list.Display()
Linked_List
python
kamyu104__LeetCode-Solutions
Python/construct-quad-tree.py
{ "start": 29, "end": 324 }
class ____(object): def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight): self.val = val self.isLeaf = isLeaf self.topLeft = topLeft self.topRight = topRight self.bottomLeft = bottomLeft self.bottomRight = bottomRight
Node
python
ethereum__web3.py
web3/eth/base_eth.py
{ "start": 628, "end": 6737 }
class ____(Module): _default_account: ChecksumAddress | Empty = empty _default_block: BlockIdentifier = "latest" _default_contract_factory: Any = None _gas_price_strategy = None is_async = False account = Account() def namereg(self) -> NoReturn: raise NotImplementedError() def icap_namereg(self) -> NoReturn: raise NotImplementedError() @property def default_block(self) -> BlockIdentifier: return self._default_block @default_block.setter def default_block(self, value: BlockIdentifier) -> None: self._default_block = value @property def default_account(self) -> ChecksumAddress | Empty: return self._default_account @default_account.setter def default_account(self, account: ChecksumAddress | Empty) -> None: self._default_account = account def send_transaction_munger(self, transaction: TxParams) -> tuple[TxParams]: if "from" not in transaction and is_checksum_address(self.default_account): transaction = assoc(transaction, "from", self.default_account) return (transaction,) def generate_gas_price( self, transaction_params: TxParams | None = None ) -> Wei | None: if self._gas_price_strategy: return self._gas_price_strategy(self.w3, transaction_params) return None def set_gas_price_strategy( self, gas_price_strategy: GasPriceStrategy | None ) -> None: self._gas_price_strategy = gas_price_strategy def _eth_call_and_estimate_gas_munger( self, transaction: TxParams, block_identifier: BlockIdentifier | None = None, state_override: StateOverride | None = None, ) -> ( tuple[TxParams, BlockIdentifier] | tuple[TxParams, BlockIdentifier, StateOverride] ): # TODO: move to middleware if "from" not in transaction and is_checksum_address(self.default_account): transaction = assoc(transaction, "from", self.default_account) # TODO: move to middleware if block_identifier is None: block_identifier = self.default_block if state_override is None: return (transaction, block_identifier) else: return (transaction, block_identifier, state_override) def estimate_gas_munger( self, transaction: TxParams, block_identifier: BlockIdentifier | None = None, state_override: StateOverride | None = None, ) -> ( tuple[TxParams, BlockIdentifier] | tuple[TxParams, BlockIdentifier, StateOverride] ): return self._eth_call_and_estimate_gas_munger( transaction, block_identifier, state_override ) def get_block_munger( self, block_identifier: BlockIdentifier, full_transactions: bool = False ) -> tuple[BlockIdentifier, bool]: return (block_identifier, full_transactions) def block_id_munger( self, account: Address | ChecksumAddress | ENS, block_identifier: BlockIdentifier | None = None, ) -> tuple[Address | ChecksumAddress | ENS, BlockIdentifier]: if block_identifier is None: block_identifier = self.default_block return (account, block_identifier) def get_storage_at_munger( self, account: Address | ChecksumAddress | ENS, position: int, block_identifier: BlockIdentifier | None = None, ) -> tuple[Address | ChecksumAddress | ENS, int, BlockIdentifier]: if block_identifier is None: block_identifier = self.default_block return (account, position, block_identifier) def call_munger( self, transaction: TxParams, block_identifier: BlockIdentifier | None = None, state_override: StateOverride | None = None, ) -> ( tuple[TxParams, BlockIdentifier] | tuple[TxParams, BlockIdentifier, StateOverride] ): return self._eth_call_and_estimate_gas_munger( transaction, block_identifier, state_override ) def create_access_list_munger( self, transaction: TxParams, block_identifier: BlockIdentifier | None = None ) -> tuple[TxParams, BlockIdentifier]: # TODO: move to middleware if "from" not in transaction and is_checksum_address(self.default_account): transaction = assoc(transaction, "from", self.default_account) # TODO: move to middleware if block_identifier is None: block_identifier = self.default_block return (transaction, block_identifier) def sign_munger( self, account: Address | ChecksumAddress | ENS, data: int | bytes = None, hexstr: HexStr = None, text: str = None, ) -> tuple[Address | ChecksumAddress | ENS, HexStr]: message_hex = to_hex(data, hexstr=hexstr, text=text) return (account, message_hex) def filter_munger( self, filter_params: str | FilterParams | None = None, filter_id: HexStr | None = None, ) -> list[FilterParams] | list[HexStr] | list[str]: if filter_id and filter_params: raise Web3TypeError( "Ambiguous invocation: provide either a `filter_params` or a " "`filter_id` argument. Both were supplied." ) if isinstance(filter_params, dict): return [filter_params] elif is_string(filter_params): if filter_params in {"latest", "pending"}: return [filter_params] else: raise Web3ValueError( "The filter API only accepts the values of `pending` or " "`latest` for string based filters" ) elif filter_id and not filter_params: return [filter_id] else: raise Web3TypeError( "Must provide either filter_params as a string or " "a valid filter object, or a filter_id as a string " "or hex." )
BaseEth
python
Textualize__textual
examples/calculator.py
{ "start": 437, "end": 5928 }
class ____(App): """A working 'desktop' calculator.""" CSS_PATH = "calculator.tcss" numbers = var("0") show_ac = var(True) left = var(Decimal("0")) right = var(Decimal("0")) value = var("") operator = var("plus") # Maps button IDs on to the corresponding key name NAME_MAP = { "asterisk": "multiply", "slash": "divide", "underscore": "plus-minus", "full_stop": "point", "plus_minus_sign": "plus-minus", "percent_sign": "percent", "equals_sign": "equals", "minus": "minus", "plus": "plus", } def watch_numbers(self, value: str) -> None: """Called when numbers is updated.""" self.query_one("#numbers", Digits).update(value) def compute_show_ac(self) -> bool: """Compute switch to show AC or C button""" return self.value in ("", "0") and self.numbers == "0" def watch_show_ac(self, show_ac: bool) -> None: """Called when show_ac changes.""" self.query_one("#c").display = not show_ac self.query_one("#ac").display = show_ac def compose(self) -> ComposeResult: """Add our buttons.""" with Container(id="calculator"): yield Digits(id="numbers") yield Button("AC", id="ac", variant="primary") yield Button("C", id="c", variant="primary") yield Button("+/-", id="plus-minus", variant="primary") yield Button("%", id="percent", variant="primary") yield Button("÷", id="divide", variant="warning") yield Button("7", id="number-7", classes="number") yield Button("8", id="number-8", classes="number") yield Button("9", id="number-9", classes="number") yield Button("×", id="multiply", variant="warning") yield Button("4", id="number-4", classes="number") yield Button("5", id="number-5", classes="number") yield Button("6", id="number-6", classes="number") yield Button("-", id="minus", variant="warning") yield Button("1", id="number-1", classes="number") yield Button("2", id="number-2", classes="number") yield Button("3", id="number-3", classes="number") yield Button("+", id="plus", variant="warning") yield Button("0", id="number-0", classes="number") yield Button(".", id="point") yield Button("=", id="equals", variant="warning") def on_key(self, event: events.Key) -> None: """Called when the user presses a key.""" def press(button_id: str) -> None: """Press a button, should it exist.""" try: self.query_one(f"#{button_id}", Button).press() except NoMatches: pass key = event.key if key.isdecimal(): press(f"number-{key}") elif key == "c": press("c") press("ac") else: button_id = self.NAME_MAP.get(key) if button_id is not None: press(self.NAME_MAP.get(key, key)) @on(Button.Pressed, ".number") def number_pressed(self, event: Button.Pressed) -> None: """Pressed a number.""" assert event.button.id is not None number = event.button.id.partition("-")[-1] self.numbers = self.value = self.value.lstrip("0") + number @on(Button.Pressed, "#plus-minus") def plus_minus_pressed(self) -> None: """Pressed + / -""" self.numbers = self.value = str(Decimal(self.value or "0") * -1) @on(Button.Pressed, "#percent") def percent_pressed(self) -> None: """Pressed %""" self.numbers = self.value = str(Decimal(self.value or "0") / Decimal(100)) @on(Button.Pressed, "#point") def pressed_point(self) -> None: """Pressed .""" if "." not in self.value: self.numbers = self.value = (self.value or "0") + "." @on(Button.Pressed, "#ac") def pressed_ac(self) -> None: """Pressed AC""" self.value = "" self.left = self.right = Decimal(0) self.operator = "plus" self.numbers = "0" @on(Button.Pressed, "#c") def pressed_c(self) -> None: """Pressed C""" self.value = "" self.numbers = "0" def _do_math(self) -> None: """Does the math: LEFT OPERATOR RIGHT""" try: if self.operator == "plus": self.left += self.right elif self.operator == "minus": self.left -= self.right elif self.operator == "divide": self.left /= self.right elif self.operator == "multiply": self.left *= self.right self.numbers = str(self.left) self.value = "" except Exception: self.numbers = "Error" @on(Button.Pressed, "#plus,#minus,#divide,#multiply") def pressed_op(self, event: Button.Pressed) -> None: """Pressed one of the arithmetic operations.""" self.right = Decimal(self.value or "0") self._do_math() assert event.button.id is not None self.operator = event.button.id @on(Button.Pressed, "#equals") def pressed_equals(self) -> None: """Pressed =""" if self.value: self.right = Decimal(self.value) self._do_math() if __name__ == "__main__": CalculatorApp().run(inline=True)
CalculatorApp
python
walkccc__LeetCode
solutions/2751. Robot Collisions/2751.py
{ "start": 122, "end": 1232 }
class ____: def survivedRobotsHealths( self, positions: list[int], healths: list[int], directions: str, ) -> list[int]: robots = sorted([Robot(index, position, health, direction) for index, (position, health, direction) in enumerate(zip(positions, healths, directions))], key=lambda x: x.position) stack: list[Robot] = [] # running robots for robot in robots: if robot.direction == 'R': stack.append(robot) continue # Collide with robots going right if any. while stack and stack[-1].direction == 'R' and robot.health > 0: if stack[-1].health == robot.health: stack.pop() robot.health = 0 elif stack[-1].health < robot.health: stack.pop() robot.health -= 1 else: # stack[-1].health > robot.health stack[-1].health -= 1 robot.health = 0 if robot.health > 0: stack.append(robot) stack.sort(key=lambda robot: robot.index) return [robot.health for robot in stack]
Solution
python
kamyu104__LeetCode-Solutions
Python/binary-watch.py
{ "start": 30, "end": 732 }
class ____(object): def readBinaryWatch(self, num): """ :type num: int :rtype: List[str] """ def bit_count(bits): count = 0 while bits: bits &= bits-1 count += 1 return count return ['%d:%02d' % (h, m) for h in xrange(12) for m in xrange(60) if bit_count(h) + bit_count(m) == num] def readBinaryWatch2(self, num): """ :type num: int :rtype: List[str] """ return ['{0}:{1}'.format(str(h), str(m).zfill(2)) for h in range(12) for m in range(60) if (bin(h) + bin(m)).count('1') == num]
Solution
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 677450, "end": 719460 }
class ____(AnyMark): """ MarkDef schema wrapper. Parameters ---------- type : :class:`Mark`, Literal['arc', 'area', 'bar', 'image', 'line', 'point', 'rect', 'rule', 'text', 'tick', 'trail', 'circle', 'square', 'geoshape'] The mark type. This could a primitive mark type (one of ``"bar"``, ``"circle"``, ``"square"``, ``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"geoshape"``, ``"rule"``, and ``"text"``) or a composite mark type (``"boxplot"``, ``"errorband"``, ``"errorbar"``). align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right'] The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule). One of ``"left"``, ``"right"``, ``"center"``. **Note:** Expression reference is *not* supported for range marks. angle : dict, float, :class:`ExprRef` The rotation angle of the text, in degrees. aria : bool, dict, :class:`ExprRef` A boolean flag indicating if `ARIA attributes <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on the output SVG element, removing the mark item from the ARIA accessibility tree. ariaRole : str, dict, :class:`ExprRef` Sets the type of user interface element of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the "role" attribute. Warning: this property is experimental and may be changed in the future. ariaRoleDescription : str, dict, :class:`ExprRef` A human-readable, author-localized description for the role of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the "aria-roledescription" attribute. Warning: this property is experimental and may be changed in the future. aspect : bool, dict, :class:`ExprRef` Whether to keep aspect ratio of image marks. bandSize : float The width of the ticks. **Default value:** 3/4 of step (width step for horizontal ticks and height step for vertical ticks). baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom'] For text marks, the vertical text baseline. One of ``"alphabetic"`` (default), ``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an expression reference that provides one of the valid values. The ``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are calculated relative to the ``lineHeight`` rather than ``fontSize`` alone. For range marks, the vertical alignment of the marks. One of ``"top"``, ``"middle"``, ``"bottom"``. **Note:** Expression reference is *not* supported for range marks. binSpacing : float Offset between bars for binned field. The ideal value for this is either 0 (preferred by statisticians) or 1 (Vega-Lite default, D3 example style). **Default value:** ``1`` blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity'] The color blend mode for drawing an item on its current background. Any valid `CSS mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__ value can be used. **Default value:** ``"source-over"`` clip : bool, dict, :class:`ExprRef` Whether a mark be clipped to the enclosing group's width and height. color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'] Default color. **Default value:** ``"#4682b4"`` **Note:** * This property cannot be used in a `style config <https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. * The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and will override ``color``. continuousBandSize : float The default size of the bars on continuous scales. **Default value:** ``5`` cornerRadius : dict, float, :class:`ExprRef` The radius in pixels of rounded rectangles or arcs' corners. **Default value:** ``0`` cornerRadiusBottomLeft : dict, float, :class:`ExprRef` The radius in pixels of rounded rectangles' bottom left corner. **Default value:** ``0`` cornerRadiusBottomRight : dict, float, :class:`ExprRef` The radius in pixels of rounded rectangles' bottom right corner. **Default value:** ``0`` cornerRadiusEnd : dict, float, :class:`ExprRef` * For vertical bars, top-left and top-right corner radius. * For horizontal bars, top-right and bottom-right corner radius. cornerRadiusTopLeft : dict, float, :class:`ExprRef` The radius in pixels of rounded rectangles' top right corner. **Default value:** ``0`` cornerRadiusTopRight : dict, float, :class:`ExprRef` The radius in pixels of rounded rectangles' top left corner. **Default value:** ``0`` cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing'] The mouse cursor used over the mark. Any valid `CSS cursor type <https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used. description : str, dict, :class:`ExprRef` A text description of the mark item for `ARIA accessibility <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output only). If specified, this property determines the `"aria-label" attribute <https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__. dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl'] The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"`` (right-to-left). This property determines on which side is truncated in response to the limit parameter. **Default value:** ``"ltr"`` discreteBandSize : dict, float, :class:`RelativeBandSize` The default size of the bars with discrete dimensions. If unspecified, the default size is ``step-2``, which provides 2 pixel offset between bars. dx : dict, float, :class:`ExprRef` The horizontal offset, in pixels, between the text label and its anchor point. The offset is applied after rotation by the *angle* property. dy : dict, float, :class:`ExprRef` The vertical offset, in pixels, between the text label and its anchor point. The offset is applied after rotation by the *angle* property. ellipsis : str, dict, :class:`ExprRef` The ellipsis string for text truncated in response to the limit parameter. **Default value:** ``"…"`` fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None Default fill color. This property has higher precedence than ``config.color``. Set to ``null`` to remove fill. **Default value:** (None) fillOpacity : dict, float, :class:`ExprRef` The fill opacity (value between [0,1]). **Default value:** ``1`` filled : bool Whether the mark's color should be used as fill color instead of stroke color. **Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well as ``geoshape`` marks for `graticule <https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources; otherwise, ``true``. **Note:** This property cannot be used in a `style config <https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. font : str, dict, :class:`ExprRef` The typeface to set the text in (e.g., ``"Helvetica Neue"``). fontSize : dict, float, :class:`ExprRef` The font size, in pixels. **Default value:** ``11`` fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle` The font style (e.g., ``"italic"``). fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900] The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and ``"bold"`` = ``700``). height : dict, float, :class:`ExprRef`, :class:`RelativeBandSize` Height of the marks. One of: * A number representing a fixed pixel height. * A relative band size definition. For example, ``{band: 0.5}`` represents half of the band href : str, dict, :class:`URI`, :class:`ExprRef` A URL to load upon mouse click. If defined, the mark acts as a hyperlink. innerRadius : dict, float, :class:`ExprRef` The inner radius in pixels of arc marks. ``innerRadius`` is an alias for ``radius2``. **Default value:** ``0`` interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after'] The line interpolation method to use for line and area marks. One of the following: * ``"linear"``: piecewise linear segments, as in a polyline. * ``"linear-closed"``: close the linear segments to form a polygon. * ``"step"``: alternate between horizontal and vertical segments, as in a step function. * ``"step-before"``: alternate between vertical and horizontal segments, as in a step function. * ``"step-after"``: alternate between horizontal and vertical segments, as in a step function. * ``"basis"``: a B-spline, with control point duplication on the ends. * ``"basis-open"``: an open B-spline; may not intersect the start or end. * ``"basis-closed"``: a closed B-spline, as in a loop. * ``"cardinal"``: a Cardinal spline, with control point duplication on the ends. * ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end, but will intersect other control points. * ``"cardinal-closed"``: a closed Cardinal spline, as in a loop. * ``"bundle"``: equivalent to basis, except the tension parameter is used to straighten the spline. * ``"monotone"``: cubic interpolation that preserves monotonicity in y. invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None Invalid data mode, which defines how the marks and corresponding scales should represent invalid values (``null`` and ``NaN`` in continuous scales *without* defined output for invalid values). * ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and *scales*. For path marks (for line, area, trail), this option will create paths that connect valid points, as if the data rows with invalid values do not exist. * ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at invalid values. For non-path marks, this is equivalent to ``"filter"``. All *scale* domains will *exclude* these filtered data points. * ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid values. Hide invalid values for non-path marks. All *scale* domains will *include* these filtered data points (for both path and non-path marks). * ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each scale will use the output for invalid values defined in ``config.scale.invalid`` or, if unspecified, by default invalid values will produce the same visual values as zero (if the scale includes zero) or the minimum value (if the scale does not include zero). * ``"break-paths-show-path-domains"`` (default) — This is equivalent to ``"break-paths-show-domains"`` for path-based marks (line/area/trail) and ``"filter"`` for non-path marks. **Note**: If any channel's scale has an output for invalid values defined in ``config.scale.invalid``, all values for the scales will be considered "valid" since they can produce a reasonable output for the scales. Thus, fields for such channels will not be filtered and will not cause path breaks. limit : dict, float, :class:`ExprRef` The maximum length of the text mark in pixels. The text value will be automatically truncated if the rendered size exceeds the limit. **Default value:** ``0`` -- indicating no limit line : bool, dict, :class:`OverlayMarkDef` A flag for overlaying line on top of area marks, or an object defining the properties of the overlayed lines. * If this value is an empty object (``{}``) or ``true``, lines with default properties will be used. * If this value is ``false``, no lines would be automatically added to area marks. **Default value:** ``false``. lineBreak : str, dict, :class:`ExprRef` A delimiter, such as a newline character, upon which to break text strings into multiple lines. This property is ignored if the text is array-valued. lineHeight : dict, float, :class:`ExprRef` The line height in pixels (the spacing between subsequent lines of text) for multi-line text marks. minBandSize : dict, float, :class:`ExprRef` The minimum band size for bar and rectangle marks. **Default value:** ``0.25`` opacity : dict, float, :class:`ExprRef` The overall opacity (value between [0,1]). **Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``, ``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise. order : bool, None For line and trail marks, this ``order`` property can be set to ``null`` or ``false`` to make the lines use the original order in the data sources. orient : :class:`Orientation`, Literal['horizontal', 'vertical'] The orientation of a non-stacked bar, tick, area, and line charts. The value is either horizontal (default) or vertical. * For bar, rule and tick, this determines whether the size of the bar and tick should be applied to x or y dimension. * For area, this property determines the orient property of the Vega output. * For line and trail marks, this property determines the sort order of the points in the line if ``config.sortLineBy`` is not specified. For stacked charts, this is always determined by the orientation of the stack; therefore explicitly specified value will be ignored. outerRadius : dict, float, :class:`ExprRef` The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``. **Default value:** ``0`` padAngle : dict, float, :class:`ExprRef` The angular padding applied to sides of the arc, in radians. point : bool, dict, Literal['transparent'], :class:`OverlayMarkDef` A flag for overlaying points on top of line or area marks, or an object defining the properties of the overlayed points. * If this property is ``"transparent"``, transparent points will be used (for enhancing tooltips and selections). * If this property is an empty object (``{}``) or ``true``, filled points with default properties will be used. * If this property is ``false``, no points would be automatically added to line or area marks. **Default value:** ``false``. radius : dict, float, :class:`ExprRef` For arc mark, the primary (outer) radius in pixels. For text marks, polar coordinate radial offset, in pixels, of the text from the origin determined by the ``x`` and ``y`` properties. **Default value:** ``min(plot_width, plot_height)/2`` radius2 : dict, float, :class:`ExprRef` The secondary (inner) radius in pixels of arc marks. **Default value:** ``0`` radius2Offset : dict, float, :class:`ExprRef` Offset for radius2. radiusOffset : dict, float, :class:`ExprRef` Offset for radius. shape : str, dict, :class:`ExprRef`, :class:`SymbolShape` Shape of the point marks. Supported values include: * plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``, ``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or ``"triangle-left"``. * the line symbol ``"stroke"`` * centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"`` * a custom `SVG path string <https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct sizing, custom shape paths should be defined within a square bounding box with coordinates ranging from -1 to 1 along both the x and y dimensions.) **Default value:** ``"circle"`` size : dict, float, :class:`ExprRef` Default size for marks. * For ``point``/``circle``/``square``, this represents the pixel area of the marks. Note that this value sets the area of the symbol; the side lengths will increase with the square root of this value. * For ``bar``, this represents the band size of the bar, in pixels. * For ``text``, this represents the font size, in pixels. **Default value:** * ``30`` for point, circle, square marks; width/height's ``step`` * ``2`` for bar marks with discrete dimensions; * ``5`` for bar marks with continuous dimensions; * ``11`` for text marks. smooth : bool, dict, :class:`ExprRef` A boolean flag (default true) indicating if the image should be smoothed when resized. If false, individual pixels should be scaled directly rather than interpolated with smoothing. For SVG rendering, this option may not work in some browsers due to lack of standardization. stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None Default stroke color. This property has higher precedence than ``config.color``. Set to ``null`` to remove stroke. **Default value:** (None) strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square'] The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or ``"square"``. **Default value:** ``"butt"`` strokeDash : dict, Sequence[float], :class:`ExprRef` An array of alternating stroke, space lengths for creating dashed or dotted lines. strokeDashOffset : dict, float, :class:`ExprRef` The offset (in pixels) into which to begin drawing with the stroke dash array. strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel'] The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``. **Default value:** ``"miter"`` strokeMiterLimit : dict, float, :class:`ExprRef` The miter limit at which to bevel a line join. strokeOffset : dict, float, :class:`ExprRef` The offset in pixels at which to draw the group stroke and fill. If unspecified, the default behavior is to dynamically offset stroked groups such that 1 pixel stroke widths align with the pixel grid. strokeOpacity : dict, float, :class:`ExprRef` The stroke opacity (value between [0,1]). **Default value:** ``1`` strokeWidth : dict, float, :class:`ExprRef` The stroke width, in pixels. style : str, Sequence[str] A string or array of strings indicating the name of custom styles to apply to the mark. A style is a named collection of mark property defaults defined within the `style configuration <https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. If style is an array, later styles will override earlier styles. Any `mark properties <https://vega.github.io/vega-lite/docs/encoding.html#mark-prop>`__ explicitly defined within the ``encoding`` will override a style default. **Default value:** The mark's name. For example, a bar mark will have style ``"bar"`` by default. **Note:** Any specified style will augment the default style. For example, a bar mark with ``"style": "foo"`` will receive from ``config.style.bar`` and ``config.style.foo`` (the specified style ``"foo"`` has higher precedence). tension : dict, float, :class:`ExprRef` Depending on the interpolation type, sets the tension parameter (for line and area marks). text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef` Placeholder text if the ``text`` channel is not specified theta : dict, float, :class:`ExprRef` * For arc marks, the arc length in radians if theta2 is not specified, otherwise the start arc angle. (A value of 0 indicates up or “north”, increasing values proceed clockwise.) * For text marks, polar coordinate angle in radians. theta2 : dict, float, :class:`ExprRef` The end angle of arc marks in radians. A value of 0 indicates up or “north”, increasing values proceed clockwise. theta2Offset : dict, float, :class:`ExprRef` Offset for theta2. thetaOffset : dict, float, :class:`ExprRef` Offset for theta. thickness : float Thickness of the tick mark. **Default value:** ``1`` time : dict, float, :class:`ExprRef` timeUnitBandPosition : float Default relative band position for a time unit. If set to ``0``, the marks will be positioned at the beginning of the time unit band step. If set to ``0.5``, the marks will be positioned in the middle of the time unit band step. timeUnitBandSize : float Default relative band size for a time unit. If set to ``1``, the bandwidth of the marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the marks will be half of the time unit band step. tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None The tooltip text string to show upon mouse hover or an object defining which fields should the tooltip be derived from. * If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from ``encoding`` will be used. * If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the highlighted data point will be used. * If set to ``null`` or ``false``, then no tooltip will be used. See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__ documentation for a detailed discussion about tooltip in Vega-Lite. **Default value:** ``null`` url : str, dict, :class:`URI`, :class:`ExprRef` The URL of the image file for image marks. width : dict, float, :class:`ExprRef`, :class:`RelativeBandSize` Width of the marks. One of: * A number representing a fixed pixel width. * A relative band size definition. For example, ``{band: 0.5}`` represents half of the band. x : dict, float, :class:`ExprRef`, Literal['width'] X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without specified ``x2`` or ``width``. The ``value`` of this channel can be a number or a string ``"width"`` for the width of the plot. x2 : dict, float, :class:`ExprRef`, Literal['width'] X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``. The ``value`` of this channel can be a number or a string ``"width"`` for the width of the plot. x2Offset : dict, float, :class:`ExprRef` Offset for x2-position. xOffset : dict, float, :class:`ExprRef` Offset for x-position. y : dict, float, :class:`ExprRef`, Literal['height'] Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without specified ``y2`` or ``height``. The ``value`` of this channel can be a number or a string ``"height"`` for the height of the plot. y2 : dict, float, :class:`ExprRef`, Literal['height'] Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``. The ``value`` of this channel can be a number or a string ``"height"`` for the height of the plot. y2Offset : dict, float, :class:`ExprRef` Offset for y2-position. yOffset : dict, float, :class:`ExprRef` Offset for y-position. """ _schema = {"$ref": "#/definitions/MarkDef"} def __init__( self, type: Optional[SchemaBase | Mark_T] = Undefined, align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined, angle: Optional[float | Parameter | SchemaBase | Map] = Undefined, aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined, ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined, ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined, aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined, bandSize: Optional[float] = Undefined, baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined, binSpacing: Optional[float] = Undefined, blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined, clip: Optional[bool | Parameter | SchemaBase | Map] = Undefined, color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined, continuousBandSize: Optional[float] = Undefined, cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined, cornerRadiusBottomLeft: Optional[ float | Parameter | SchemaBase | Map ] = Undefined, cornerRadiusBottomRight: Optional[ float | Parameter | SchemaBase | Map ] = Undefined, cornerRadiusEnd: Optional[float | Parameter | SchemaBase | Map] = Undefined, cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined, cornerRadiusTopRight: Optional[ float | Parameter | SchemaBase | Map ] = Undefined, cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined, description: Optional[str | Parameter | SchemaBase | Map] = Undefined, dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined, discreteBandSize: Optional[float | SchemaBase | Map] = Undefined, dx: Optional[float | Parameter | SchemaBase | Map] = Undefined, dy: Optional[float | Parameter | SchemaBase | Map] = Undefined, ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined, fill: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, filled: Optional[bool] = Undefined, font: Optional[str | Parameter | SchemaBase | Map] = Undefined, fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined, fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined, height: Optional[float | Parameter | SchemaBase | Map] = Undefined, href: Optional[str | Parameter | SchemaBase | Map] = Undefined, innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined, interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined, invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined, limit: Optional[float | Parameter | SchemaBase | Map] = Undefined, line: Optional[bool | SchemaBase | Map] = Undefined, lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined, lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined, minBandSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, order: Optional[bool | None] = Undefined, orient: Optional[SchemaBase | Orientation_T] = Undefined, outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined, padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined, point: Optional[bool | SchemaBase | Literal["transparent"] | Map] = Undefined, radius: Optional[float | Parameter | SchemaBase | Map] = Undefined, radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined, radius2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, radiusOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, shape: Optional[str | Parameter | SchemaBase | Map] = Undefined, size: Optional[float | Parameter | SchemaBase | Map] = Undefined, smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined, stroke: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined, strokeDash: Optional[ Parameter | SchemaBase | Sequence[float] | Map ] = Undefined, strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined, strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined, style: Optional[str | Sequence[str]] = Undefined, tension: Optional[float | Parameter | SchemaBase | Map] = Undefined, text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined, theta: Optional[float | Parameter | SchemaBase | Map] = Undefined, theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined, theta2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, thetaOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, thickness: Optional[float] = Undefined, time: Optional[float | Parameter | SchemaBase | Map] = Undefined, timeUnitBandPosition: Optional[float] = Undefined, timeUnitBandSize: Optional[float] = Undefined, tooltip: Optional[ str | bool | float | Parameter | SchemaBase | Map | None ] = Undefined, url: Optional[str | Parameter | SchemaBase | Map] = Undefined, width: Optional[float | Parameter | SchemaBase | Map] = Undefined, x: Optional[ float | Parameter | SchemaBase | Literal["width"] | Map ] = Undefined, x2: Optional[ float | Parameter | SchemaBase | Literal["width"] | Map ] = Undefined, x2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, xOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, y: Optional[ float | Parameter | SchemaBase | Literal["height"] | Map ] = Undefined, y2: Optional[ float | Parameter | SchemaBase | Literal["height"] | Map ] = Undefined, y2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, yOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, **kwds, ): super().__init__( type=type, align=align, angle=angle, aria=aria, ariaRole=ariaRole, ariaRoleDescription=ariaRoleDescription, aspect=aspect, bandSize=bandSize, baseline=baseline, binSpacing=binSpacing, blend=blend, clip=clip, color=color, continuousBandSize=continuousBandSize, cornerRadius=cornerRadius, cornerRadiusBottomLeft=cornerRadiusBottomLeft, cornerRadiusBottomRight=cornerRadiusBottomRight, cornerRadiusEnd=cornerRadiusEnd, cornerRadiusTopLeft=cornerRadiusTopLeft, cornerRadiusTopRight=cornerRadiusTopRight, cursor=cursor, description=description, dir=dir, discreteBandSize=discreteBandSize, dx=dx, dy=dy, ellipsis=ellipsis, fill=fill, fillOpacity=fillOpacity, filled=filled, font=font, fontSize=fontSize, fontStyle=fontStyle, fontWeight=fontWeight, height=height, href=href, innerRadius=innerRadius, interpolate=interpolate, invalid=invalid, limit=limit, line=line, lineBreak=lineBreak, lineHeight=lineHeight, minBandSize=minBandSize, opacity=opacity, order=order, orient=orient, outerRadius=outerRadius, padAngle=padAngle, point=point, radius=radius, radius2=radius2, radius2Offset=radius2Offset, radiusOffset=radiusOffset, shape=shape, size=size, smooth=smooth, stroke=stroke, strokeCap=strokeCap, strokeDash=strokeDash, strokeDashOffset=strokeDashOffset, strokeJoin=strokeJoin, strokeMiterLimit=strokeMiterLimit, strokeOffset=strokeOffset, strokeOpacity=strokeOpacity, strokeWidth=strokeWidth, style=style, tension=tension, text=text, theta=theta, theta2=theta2, theta2Offset=theta2Offset, thetaOffset=thetaOffset, thickness=thickness, time=time, timeUnitBandPosition=timeUnitBandPosition, timeUnitBandSize=timeUnitBandSize, tooltip=tooltip, url=url, width=width, x=x, x2=x2, x2Offset=x2Offset, xOffset=xOffset, y=y, y2=y2, y2Offset=y2Offset, yOffset=yOffset, **kwds, )
MarkDef
python
lazyprogrammer__machine_learning_examples
rl3/a2c/atari_wrappers.py
{ "start": 6104, "end": 7030 }
class ____(gym.Wrapper): def __init__(self, env, k): """Stack k last frames. Returns lazy array, which is much more memory efficient. See Also -------- baselines.common.atari_wrappers.LazyFrames """ gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k)) def reset(self): ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self._get_ob() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self._get_ob(), reward, done, info def _get_ob(self): assert len(self.frames) == self.k return LazyFrames(list(self.frames))
FrameStack
python
joke2k__faker
faker/providers/phone_number/en_PH/__init__.py
{ "start": 67, "end": 8227 }
class ____(BaseProvider): """ Provider for Philippine mobile and landline telephone numbers This provider has methods that generate phone numbers specific to service providers whenever applicable, because the kinds of services, the quality of said services, and even the fees may vary depending on the service provider and the service location. This in turn, affects subscriber behavior, e.g. someone with a SIM from company X may be very unlikely to respond to calls and texts sent from a company Y SIM as the service charge might be more expensive. The provider methods are there to enable the creation of more "realistic" fake data for such cases. Additional Notes: - The Philippine telecommunication industry is dominated by the Globe-PLDT duopoly. Globe offers landline services under the Globe brand and mobile services under the Globe and TM brands. PLDT offers landline services under the PLDT brand, and its subsidiaries offer mobile services under the Smart, TNT, and SUN brands. The rest of the industry is shared by smaller players, and Bayantel is one of the more well-known players that provide landline services. - Globe mobile prefixes include both Globe and TM brands, and the Smart mobile prefixes include both Smart and TNT brands but not the SUN brand. Available sources only split the prefixes this way. - In October 2019, Area 2 landline numbers were migrated to an 8 digit scheme, while the rest of the country still uses the original 7 digit scheme. Area 2 is comprised of the whole National Capital Region (aka Metro Manila) and parts of surrounding provinces, and within this area, the service provider's identifier is included in every 8 digit landline number. Sources: - https://en.wikipedia.org/wiki/Telephone_numbers_in_the_Philippines - https://www.prefix.ph/prefixes/2019-updated-complete-list-of-philippine-mobile-network-prefixes/ - https://powerpinoys.com/network-prefixes-philippines/ """ globe_mobile_number_prefixes: Tuple[str, ...] = ( "817", "904", "905", "906", "915", "916", "917", "926", "927", "935", "936", "937", "945", "955", "956", "965", "966", "967", "973", "975", "976", "977", "978", "979", "994", "995", "996", "997", ) smart_mobile_number_prefixes: Tuple[str, ...] = ( "813", "907", "908", "909", "910", "911", "912", "913", "914", "918", "919", "920", "921", "928", "929", "930", "938", "939", "940", "946", "947", "948", "949", "950", "951", "961", "970", "981", "989", "992", "998", "999", ) sun_mobile_number_prefixes: Tuple[str, ...] = ( "922", "923", "924", "925", "931", "932", "933", "934", "941", "942", "943", "944", ) globe_mobile_number_formats: Tuple[str, ...] = ( "0{{globe_mobile_number_prefix}}-###-####", "+63{{globe_mobile_number_prefix}}-###-####", ) smart_mobile_number_formats: Tuple[str, ...] = ( "0{{smart_mobile_number_prefix}}-###-####", "+63{{smart_mobile_number_prefix}}-###-####", ) sun_mobile_number_formats: Tuple[str, ...] = ( "0{{sun_mobile_number_prefix}}-###-####", "+63{{sun_mobile_number_prefix}}-###-####", ) mobile_number_formats: Tuple[str, ...] = ( globe_mobile_number_formats + smart_mobile_number_formats + sun_mobile_number_formats ) bayantel_landline_identifiers: Tuple[str, ...] = tuple(str(x) for x in range(3000, 3500)) misc_landline_identifiers: Tuple[str, ...] = tuple(str(x) for x in range(5300, 5800)) + tuple( str(x) for x in range(6000, 6700) ) non_area2_landline_area_codes: Tuple[str, ...] = ( "32", "33", "34", "35", "36", "38", "42", "43", "44", "45", "46", "47", "48", "49", "52", "53", "54", "55", "56", "62", "63", "64", "65", "68", "72", "74", "75", "77", "78", "82", "83", "84", "85", "86", "87", "88", ) globe_area2_landline_number_formats: Tuple[str, ...] = ( "02-7###-####", "+632-7###-####", ) pldt_area2_landline_number_formats: Tuple[str, ...] = ( "02-8###-####", "+632-8###-####", ) bayantel_area2_landline_number_formats: Tuple[str, ...] = ( "02-{{bayantel_landline_identifier}}-####", "+632-{{bayantel_landline_identifier}}-####", ) misc_area2_landline_number_formats: Tuple[str, ...] = ( "02-{{misc_landline_identifier}}-####", "+632-{{misc_landline_identifier}}-####", ) area2_landline_number_formats: Tuple[str, ...] = ( globe_area2_landline_number_formats + pldt_area2_landline_number_formats + bayantel_area2_landline_number_formats + misc_area2_landline_number_formats ) non_area2_landline_number_formats: Tuple[str, ...] = ( "0{{non_area2_landline_area_code}}-###-####", "+63{{non_area2_landline_area_code}}-###-####", ) landline_number_formats: Tuple[str, ...] = area2_landline_number_formats + non_area2_landline_number_formats def _create_phone_number(self, formats: Sequence[str]) -> str: pattern: str = self.random_element(formats) return self.numerify(self.generator.parse(pattern)) def globe_mobile_number_prefix(self) -> str: return self.random_element(self.globe_mobile_number_prefixes) def smart_mobile_number_prefix(self) -> str: return self.random_element(self.smart_mobile_number_prefixes) def sun_mobile_number_prefix(self) -> str: return self.random_element(self.sun_mobile_number_prefixes) def bayantel_landline_identifier(self) -> str: return self.random_element(self.bayantel_landline_identifiers) def misc_landline_identifier(self) -> str: return self.random_element(self.misc_landline_identifiers) def non_area2_landline_area_code(self) -> str: return self.random_element(self.non_area2_landline_area_codes) def globe_mobile_number(self) -> str: return self._create_phone_number(self.globe_mobile_number_formats) def smart_mobile_number(self) -> str: return self._create_phone_number(self.smart_mobile_number_formats) def sun_mobile_number(self) -> str: return self._create_phone_number(self.sun_mobile_number_formats) def mobile_number(self) -> str: return self._create_phone_number(self.mobile_number_formats) def globe_area2_landline_number(self) -> str: return self._create_phone_number(self.globe_area2_landline_number_formats) def pldt_area2_landline_number(self) -> str: return self._create_phone_number(self.pldt_area2_landline_number_formats) def bayantel_area2_landline_number(self) -> str: return self._create_phone_number(self.bayantel_area2_landline_number_formats) def misc_area2_landline_number(self) -> str: return self._create_phone_number(self.misc_area2_landline_number_formats) def area2_landline_number(self) -> str: return self._create_phone_number(self.area2_landline_number_formats) def non_area2_landline_number(self) -> str: return self._create_phone_number(self.non_area2_landline_number_formats) def landline_number(self) -> str: return self._create_phone_number(self.landline_number_formats)
Provider
python
joke2k__faker
tests/providers/test_bank.py
{ "start": 3440, "end": 4088 }
class ____: """Test no_NO bank provider""" def test_aba(self, faker, num_samples): for _ in range(num_samples): aba = faker.aba() assert len(aba) == 9 assert is_valid_aba(aba) def test_bban(self, faker, num_samples): for _ in range(num_samples): assert re.fullmatch(r"\d{11}", faker.bban()) def test_iban(self, faker, num_samples): for _ in range(num_samples): iban = faker.iban() assert is_valid_iban(iban) assert iban[:2] == NoNoBankProvider.country_code assert re.fullmatch(r"\d{2}\d{11}", iban[2:])
TestNoNo
python
tensorflow__tensorflow
tensorflow/core/function/trace_type/trace_type_test.py
{ "start": 2113, "end": 2212 }
class ____: """Helps test memory leaks for GenericType.""" @dataclasses.dataclass
DummyGenericClass
python
automl__auto-sklearn
autosklearn/pipeline/components/data_preprocessing/rescaling/__init__.py
{ "start": 1056, "end": 3397 }
class ____(AutoSklearnChoice): @classmethod def get_components(cls: BaseEstimator) -> Dict[str, BaseEstimator]: components: Dict[str, BaseEstimator] = OrderedDict() components.update(_rescalers) components.update(additional_components.components) return components def get_hyperparameter_search_space( self, feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None, default: Optional[str] = None, include: Optional[Dict[str, str]] = None, exclude: Optional[Dict[str, str]] = None, ) -> ConfigurationSpace: cs = ConfigurationSpace() if dataset_properties is None: dataset_properties = {} # Compile a list of legal preprocessors for this problem available_preprocessors = self.get_available_components( dataset_properties=dataset_properties, include=include, exclude=exclude ) if len(available_preprocessors) == 0: raise ValueError("No rescalers found, please add any rescaling component.") if default is None: defaults = ["standardize", "none", "minmax", "normalize"] for default_ in defaults: if default_ in available_preprocessors: default = default_ break preprocessor = CategoricalHyperparameter( "__choice__", list(available_preprocessors.keys()), default_value=default ) cs.add_hyperparameter(preprocessor) for name in available_preprocessors: preprocessor_configuration_space = available_preprocessors[ name ].get_hyperparameter_search_space( feat_type=feat_type, dataset_properties=dataset_properties ) parent_hyperparameter = {"parent": preprocessor, "value": name} cs.add_configuration_space( name, preprocessor_configuration_space, parent_hyperparameter=parent_hyperparameter, ) self.configuration_space = cs self.dataset_properties = dataset_properties return cs def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE: return self.choice.transform(X)
RescalingChoice
python
ZoranPandovski__al-go-rithms
data_structures/Stack/Python/Stack.py
{ "start": 106, "end": 648 }
class ____(object): def __init__(self): # Initialize stack as empty array self.stack = [] # Return and remove the last element of the stack array. def pop(self): # If the stack is not empty, pop. if len(self.stack) > 0: return self.stack.pop() # Add an element to the end of the stack array. def push(self, element): self.stack.append(element) # Return the last element of the stack array (without removing it). def peek(self): return self.stack[-1]
Stack
python
ray-project__ray
ci/ray_ci/doc/mock/mock_module.py
{ "start": 616, "end": 1038 }
class ____: """ This class is used for testing purpose only. It should not be used in production. """ pass @Deprecated def mock_function(): """ This function is used for testing purpose only. It should not be used in production. """ pass @PublicAPI def mock_w00t(): """ This function is used for testing purpose only. It should not be used in production. """ pass
MockClass
python
facebook__pyre-check
client/configuration/search_path.py
{ "start": 4155, "end": 9003 }
class ____(RawElement): package_name: str is_toplevel_module: bool = False def package_path(self) -> str: module_suffix = ".py" if self.is_toplevel_module else "" return self.package_name + module_suffix def expand_global_root(self, global_root: str) -> "SitePackageRawElement": # Site package does not participate in root expansion. return self def expand_relative_root(self, relative_root: str) -> "SitePackageRawElement": # Site package does not participate in root expansion. return self def expand_glob(self) -> List["RawElement"]: return [self] def to_element(self, site_root: str) -> SitePackageElement: return SitePackageElement(site_root, self.package_name, self.is_toplevel_module) def create_raw_element(json: Union[str, Dict[str, object]]) -> RawElement: if isinstance(json, str): return SimpleRawElement(json) elif isinstance(json, dict): def assert_string_item(input: Dict[str, object], name: str) -> str: value = input.get(name, None) if not isinstance(value, str): raise exceptions.InvalidConfiguration( "Invalid search path element. " f"Expected item `{name}` to be a string but got {value}" ) return value if "root" in json and "subdirectory" in json: return SubdirectoryRawElement( root=assert_string_item(json, "root"), subdirectory=assert_string_item(json, "subdirectory"), ) elif "import_root" in json and "source" in json: return SubdirectoryRawElement( root=assert_string_item(json, "import_root"), subdirectory=assert_string_item(json, "source"), ) elif "site-package" in json: is_toplevel_module = ( "is_toplevel_module" in json and json["is_toplevel_module"] ) if not isinstance(is_toplevel_module, bool): raise exceptions.InvalidConfiguration( "Invalid search path element. " "Expected `is_toplevel_module` to be a boolean but " f"got {is_toplevel_module}" ) return SitePackageRawElement( package_name=assert_string_item(json, "site-package"), is_toplevel_module=bool(is_toplevel_module), ) raise exceptions.InvalidConfiguration( f"Invalid JSON format for search path element: {json}" ) def process_raw_elements( raw_elements: Iterable[RawElement], site_roots: Sequence[str], required: bool = False, ) -> List[Element]: elements: List[Element] = [] def add_if_exists(element: Element) -> bool: if os.path.exists(element.path()): elements.append(element) return True return False for raw_element in raw_elements: expanded_raw_elements = raw_element.expand_glob() if len(expanded_raw_elements) == 0 and required: raise exceptions.InvalidConfiguration( f"Invalid path {raw_element}: does not exist." ) for expanded_raw_element in expanded_raw_elements: if isinstance(expanded_raw_element, SitePackageRawElement): added = False for site_root in site_roots: if added := add_if_exists( expanded_raw_element.to_element(site_root) ): break if not added: if required: raise exceptions.InvalidConfiguration( f"Invalid path {expanded_raw_element.package_name}: does not exist." ) else: LOG.warning( "Site package does not exist: " f"`{expanded_raw_element.package_name}`" ) elif isinstance( expanded_raw_element, (SimpleRawElement, SubdirectoryRawElement) ): element = expanded_raw_element.to_element() added = add_if_exists(element) if not added: if required: raise exceptions.InvalidConfiguration( f"Path does not exist for search path: {element}" ) else: LOG.warning(f"Path does not exist for search path: {element}") else: raise RuntimeError( f"Unhandled raw search path element type: {expanded_raw_element}" ) return elements
SitePackageRawElement
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/util.py
{ "start": 52704, "end": 59952 }
class ____( ORMColumnsClauseRole[_T], SupportsCloneAnnotations, MemoizedHasCacheKey, inspection.Inspectable["Bundle[_T]"], InspectionAttr, ): """A grouping of SQL expressions that are returned by a :class:`.Query` under one namespace. The :class:`.Bundle` essentially allows nesting of the tuple-based results returned by a column-oriented :class:`_query.Query` object. It also is extensible via simple subclassing, where the primary capability to override is that of how the set of expressions should be returned, allowing post-processing as well as custom return types, without involving ORM identity-mapped classes. .. seealso:: :ref:`bundles` :class:`.DictBundle` """ single_entity = False """If True, queries for a single Bundle will be returned as a single entity, rather than an element within a keyed tuple.""" is_clause_element = False is_mapper = False is_aliased_class = False is_bundle = True _propagate_attrs: _PropagateAttrsType = util.immutabledict() proxy_set = util.EMPTY_SET exprs: List[_ColumnsClauseElement] def __init__( self, name: str, *exprs: _ColumnExpressionArgument[Any], **kw: Any ) -> None: r"""Construct a new :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) for row in session.query(bn).filter(bn.c.x == 5).filter(bn.c.y == 4): print(row.mybundle.x, row.mybundle.y) :param name: name of the bundle. :param \*exprs: columns or SQL expressions comprising the bundle. :param single_entity=False: if True, rows for this :class:`.Bundle` can be returned as a "single entity" outside of any enclosing tuple in the same manner as a mapped entity. """ # noqa: E501 self.name = self._label = name coerced_exprs = [ coercions.expect( roles.ColumnsClauseRole, expr, apply_propagate_attrs=self ) for expr in exprs ] self.exprs = coerced_exprs self.c = self.columns = ColumnCollection( (getattr(col, "key", col._label), col) for col in [e._annotations.get("bundle", e) for e in coerced_exprs] ).as_readonly() self.single_entity = kw.pop("single_entity", self.single_entity) def _gen_cache_key( self, anon_map: anon_map, bindparams: List[BindParameter[Any]] ) -> Tuple[Any, ...]: return (self.__class__, self.name, self.single_entity) + tuple( [expr._gen_cache_key(anon_map, bindparams) for expr in self.exprs] ) @property def mapper(self) -> Optional[Mapper[Any]]: mp: Optional[Mapper[Any]] = self.exprs[0]._annotations.get( "parentmapper", None ) return mp @property def entity(self) -> Optional[_InternalEntityType[Any]]: ie: Optional[_InternalEntityType[Any]] = self.exprs[ 0 ]._annotations.get("parententity", None) return ie @property def entity_namespace( self, ) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]: return self.c columns: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]] """A namespace of SQL expressions referred to by this :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) q = sess.query(bn).filter(bn.c.x == 5) Nesting of bundles is also supported:: b1 = Bundle( "b1", Bundle("b2", MyClass.a, MyClass.b), Bundle("b3", MyClass.x, MyClass.y), ) q = sess.query(b1).filter(b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) .. seealso:: :attr:`.Bundle.c` """ # noqa: E501 c: ReadOnlyColumnCollection[str, KeyedColumnElement[Any]] """An alias for :attr:`.Bundle.columns`.""" def _clone(self, **kw): cloned = self.__class__.__new__(self.__class__) cloned.__dict__.update(self.__dict__) return cloned def __clause_element__(self): # ensure existing entity_namespace remains annotations = {"bundle": self, "entity_namespace": self} annotations.update(self._annotations) plugin_subject = self.exprs[0]._propagate_attrs.get( "plugin_subject", self.entity ) return ( expression.ClauseList( _literal_as_text_role=roles.ColumnsClauseRole, group=False, *[e._annotations.get("bundle", e) for e in self.exprs], ) ._annotate(annotations) ._set_propagate_attrs( # the Bundle *must* use the orm plugin no matter what. the # subject can be None but it's much better if it's not. { "compile_state_plugin": "orm", "plugin_subject": plugin_subject, } ) ) @property def clauses(self): return self.__clause_element__().clauses def label(self, name): """Provide a copy of this :class:`.Bundle` passing a new label.""" cloned = self._clone() cloned.name = name return cloned def create_row_processor( self, query: Select[Unpack[TupleAny]], procs: Sequence[Callable[[Row[Unpack[TupleAny]]], Any]], labels: Sequence[str], ) -> Callable[[Row[Unpack[TupleAny]]], Any]: """Produce the "row processing" function for this :class:`.Bundle`. May be overridden by subclasses to provide custom behaviors when results are fetched. The method is passed the statement object and a set of "row processor" functions at query execution time; these processor functions when given a result row will return the individual attribute value, which can then be adapted into any kind of return data structure. The example below illustrates replacing the usual :class:`.Row` return structure with a straight Python dictionary:: from sqlalchemy.orm import Bundle class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): "Override create_row_processor to return values as dictionaries" def proc(row): return dict(zip(labels, (proc(row) for proc in procs))) return proc A result from the above :class:`_orm.Bundle` will return dictionary values:: bn = DictBundle("mybundle", MyClass.data1, MyClass.data2) for row in session.execute(select(bn)).where(bn.c.data1 == "d1"): print(row.mybundle["data1"], row.mybundle["data2"]) The above example is available natively using :class:`.DictBundle` .. seealso:: :class:`.DictBundle` """ # noqa: E501 keyed_tuple = result_tuple(labels, [() for l in labels]) def proc(row: Row[Unpack[TupleAny]]) -> Any: return keyed_tuple([proc(row) for proc in procs]) return proc
Bundle
python
getsentry__responses
responses/registries.py
{ "start": 2588, "end": 4141 }
class ____(FirstMatchRegistry): """Registry where `Response` objects are dependent on the insertion order and invocation index. OrderedRegistry applies the rule of first in - first out. Responses should be invoked in the same order in which they were added to the registry. Otherwise, an error is returned. """ def find( self, request: "PreparedRequest" ) -> Tuple[Optional["BaseResponse"], List[str]]: """Find the next registered `Response` and check if it matches the request. Search is performed by taking the first element of the registered responses list and removing this object (popping from the list). Parameters ---------- request : PreparedRequest Request that was caught by the custom adapter. Returns ------- Tuple[Optional["BaseResponse"], List[str]] Matched `Response` object and empty list in case of match. Otherwise, None and a list with reasons for not finding a match. """ if not self.registered: return None, ["No more registered responses"] response = self.registered.pop(0) match_result, reason = response.matches(request) if not match_result: self.reset() self.add(response) reason = ( "Next 'Response' in the order doesn't match " f"due to the following reason: {reason}." ) return None, [reason] return response, []
OrderedRegistry
python
mwaskom__seaborn
seaborn/rcmod.py
{ "start": 13860, "end": 14291 }
class ____(dict): def __enter__(self): rc = mpl.rcParams self._orig = {k: rc[k] for k in self._keys} self._set(self) def __exit__(self, exc_type, exc_value, exc_tb): self._set(self._orig) def __call__(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): with self: return func(*args, **kwargs) return wrapper
_RCAesthetics
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_object_position13.py
{ "start": 315, "end": 914 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("object_position13.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_column(1, 1, None, None, {"hidden": 1}) worksheet.insert_image("E9", self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
allegroai__clearml
examples/frameworks/pytorch/pytorch_abseil.py
{ "start": 1019, "end": 5285 }
class ____(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4 * 4 * 50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4 * 4 * 50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: Logger.current_logger().report_scalar( "train", "loss", iteration=(epoch * len(train_loader) + batch_idx), value=loss.item(), ) print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch, batch_idx * len(data), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), ) ) def test(args, model, device, test_loader, epoch): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss( output, target, reduction="sum" ).item() # sum up batch loss pred = output.argmax( dim=1, keepdim=True ) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) Logger.current_logger().report_scalar( "test", "loss", iteration=epoch, value=test_loss ) Logger.current_logger().report_scalar( "test", "accuracy", iteration=epoch, value=(correct / len(test_loader.dataset)) ) print( "Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset), ) ) def main(_): # Connecting ClearML with the current process, # from here on everything is logged automatically task = Task.init(project_name="examples", task_name="PyTorch MNIST train with abseil") use_cuda = FLAGS.cuda and torch.cuda.is_available() torch.manual_seed(FLAGS.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST( os.path.join("..", "data"), train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ), batch_size=FLAGS.batch_size, shuffle=True, **kwargs ) test_loader = torch.utils.data.DataLoader( datasets.MNIST( os.path.join("..", "data"), train=False, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ), batch_size=FLAGS.test_batch_size, shuffle=True, **kwargs ) model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=FLAGS.lr, momentum=FLAGS.momentum) for epoch in range(1, FLAGS.epochs + 1): train(FLAGS, model, device, train_loader, optimizer, epoch) test(FLAGS, model, device, test_loader, epoch) if FLAGS.save_model: torch.save(model.state_dict(), os.path.join(gettempdir(), "mnist_cnn_abseil.pt")) if __name__ == "__main__": app.run(main)
Net
python
apache__airflow
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py
{ "start": 2940, "end": 3819 }
class ____: """Spark kubernetes spec.""" def __init__(self, **entries): self.__dict__.update(entries) self.set_attribute() def set_attribute(self): self.env_vars = convert_env_vars(self.env_vars) if self.env_vars else [] self.image_pull_secrets = ( convert_image_pull_secrets(self.image_pull_secrets) if self.image_pull_secrets else [] ) if self.config_map_mounts: vols, vols_mounts = convert_configmap_to_volume(self.config_map_mounts) self.volumes.extend(vols) self.volume_mounts.extend(vols_mounts) if self.from_env_config_map: self.env_from.extend([convert_configmap(c_name) for c_name in self.from_env_config_map]) if self.from_env_secret: self.env_from.extend([convert_secret(c) for c in self.from_env_secret])
KubernetesSpec
python
kamyu104__LeetCode-Solutions
Python/count-of-substrings-containing-every-vowel-and-k-consonants-i.py
{ "start": 72, "end": 1405 }
class ____(object): def countOfSubstrings(self, word, k): """ :type word: str :type k: int :rtype: int """ VOWELS = set("aeiou") def update(i, d): if word[i] not in VOWELS: curr2[0] += d return x = ord(word[i])-ord('a') if cnt1[x] == 0: curr1[0] += 1 cnt1[x] += d if cnt1[x] == 0: curr1[0] -= 1 result = 0 cnt1, cnt2 = [0]*26, [0]*26 curr1, curr2 = [0], [0] mid = left = 0 for right in xrange(len(word)): update(right, +1) while curr2[0] > k: update(left, -1) if left < mid: assert(word[left] in VOWELS) cnt2[ord(word[left])-ord('a')] -= 1 left += 1 mid = max(mid, left) if not (curr1[0] == len(VOWELS) and curr2[0] == k): continue while word[mid] in VOWELS and cnt1[ord(word[mid])-ord('a')]-(cnt2[ord(word[mid])-ord('a')]+1) >= 1: cnt2[ord(word[mid])-ord('a')] += 1 mid += 1 result += mid-left+1 return result # Time: O(n) # Space: O(1) # two pointers, sliding window, freq table
Solution
python
cherrypy__cherrypy
cherrypy/test/test_iterator.py
{ "start": 1068, "end": 1156 }
class ____(OurIterator): def close(self): self.decrement()
OurClosableIterator
python
plotly__plotly.py
plotly/graph_objs/icicle/_marker.py
{ "start": 233, "end": 21224 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "icicle" _path_str = "icicle.marker" _valid_props = { "autocolorscale", "cauto", "cmax", "cmid", "cmin", "coloraxis", "colorbar", "colors", "colorscale", "colorssrc", "line", "pattern", "reversescale", "showscale", } @property def autocolorscale(self): """ Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if colors is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. The 'autocolorscale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["autocolorscale"] @autocolorscale.setter def autocolorscale(self, val): self["autocolorscale"] = val @property def cauto(self): """ Determines whether or not the color domain is computed with respect to the input data (here colors) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if colors is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. The 'cauto' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["cauto"] @cauto.setter def cauto(self, val): self["cauto"] = val @property def cmax(self): """ Sets the upper bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmin` must be set as well. The 'cmax' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmax"] @cmax.setter def cmax(self, val): self["cmax"] = val @property def cmid(self): """ Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if colors is set to a numerical array. Value should have the same units as colors. Has no effect when `marker.cauto` is `false`. The 'cmid' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmid"] @cmid.setter def cmid(self, val): self["cmid"] = val @property def cmin(self): """ Sets the lower bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmax` must be set as well. The 'cmin' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["cmin"] @cmin.setter def cmin(self, val): self["cmin"] = val @property def coloraxis(self): """ Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. The 'coloraxis' property is an identifier of a particular subplot, of type 'coloraxis', that may be specified as the string 'coloraxis' optionally followed by an integer >= 1 (e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.) Returns ------- str """ return self["coloraxis"] @coloraxis.setter def coloraxis(self, val): self["coloraxis"] = val @property def colorbar(self): """ The 'colorbar' property is an instance of ColorBar that may be specified as: - An instance of :class:`plotly.graph_objs.icicle.marker.ColorBar` - A dict of string/value properties that will be passed to the ColorBar constructor Returns ------- plotly.graph_objs.icicle.marker.ColorBar """ return self["colorbar"] @colorbar.setter def colorbar(self, val): self["colorbar"] = val @property def colors(self): """ Sets the color of each sector of this trace. If not specified, the default trace color set is used to pick the sector colors. The 'colors' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["colors"] @colors.setter def colors(self, val): self["colors"] = val @property def colorscale(self): """ Sets the colorscale. Has an effect only if colors is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,Bluered,Blues,Civi dis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow ,RdBu,Reds,Viridis,YlGnBu,YlOrRd. The 'colorscale' property is a colorscale and may be specified as: - A list of colors that will be spaced evenly to create the colorscale. Many predefined colorscale lists are included in the sequential, diverging, and cyclical modules in the plotly.colors package. - A list of 2-element lists where the first element is the normalized color level value (starting at 0 and ending at 1), and the second item is a valid color string. (e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']]) - One of the following named colorscales: ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance', 'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg', 'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl', 'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric', 'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys', 'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet', 'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges', 'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']. Appending '_r' to a named colorscale reverses it. Returns ------- str """ return self["colorscale"] @colorscale.setter def colorscale(self, val): self["colorscale"] = val @property def colorssrc(self): """ Sets the source reference on Chart Studio Cloud for `colors`. The 'colorssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["colorssrc"] @colorssrc.setter def colorssrc(self, val): self["colorssrc"] = val @property def line(self): """ The 'line' property is an instance of Line that may be specified as: - An instance of :class:`plotly.graph_objs.icicle.marker.Line` - A dict of string/value properties that will be passed to the Line constructor Returns ------- plotly.graph_objs.icicle.marker.Line """ return self["line"] @line.setter def line(self, val): self["line"] = val @property def pattern(self): """ Sets the pattern within the marker. The 'pattern' property is an instance of Pattern that may be specified as: - An instance of :class:`plotly.graph_objs.icicle.marker.Pattern` - A dict of string/value properties that will be passed to the Pattern constructor Returns ------- plotly.graph_objs.icicle.marker.Pattern """ return self["pattern"] @pattern.setter def pattern(self, val): self["pattern"] = val @property def reversescale(self): """ Reverses the color mapping if true. Has an effect only if colors is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. The 'reversescale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["reversescale"] @reversescale.setter def reversescale(self, val): self["reversescale"] = val @property def showscale(self): """ Determines whether or not a colorbar is displayed for this trace. Has an effect only if colors is set to a numerical array. The 'showscale' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showscale"] @showscale.setter def showscale(self, val): self["showscale"] = val @property def _prop_descriptions(self): return """\ autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if colors is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here colors) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if colors is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if colors is set to a numerical array. Value should have the same units as colors. Has no effect when `marker.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmax` must be set as well. coloraxis Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. colorbar :class:`plotly.graph_objects.icicle.marker.ColorBar` instance or dict with compatible properties colors Sets the color of each sector of this trace. If not specified, the default trace color set is used to pick the sector colors. colorscale Sets the colorscale. Has an effect only if colors is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,Bluered,Blues,Cividis,Earth,E lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd Bu,Reds,Viridis,YlGnBu,YlOrRd. colorssrc Sets the source reference on Chart Studio Cloud for `colors`. line :class:`plotly.graph_objects.icicle.marker.Line` instance or dict with compatible properties pattern Sets the pattern within the marker. reversescale Reverses the color mapping if true. Has an effect only if colors is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. showscale Determines whether or not a colorbar is displayed for this trace. Has an effect only if colors is set to a numerical array. """ def __init__( self, arg=None, autocolorscale=None, cauto=None, cmax=None, cmid=None, cmin=None, coloraxis=None, colorbar=None, colors=None, colorscale=None, colorssrc=None, line=None, pattern=None, reversescale=None, showscale=None, **kwargs, ): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.icicle.Marker` autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.colorscale`. Has an effect only if colors is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here colors) or the bounds set in `marker.cmin` and `marker.cmax` Has an effect only if colors is set to a numerical array. Defaults to `false` when `marker.cmin` and `marker.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmin` must be set as well. cmid Sets the mid-point of the color domain by scaling `marker.cmin` and/or `marker.cmax` to be equidistant to this point. Has an effect only if colors is set to a numerical array. Value should have the same units as colors. Has no effect when `marker.cauto` is `false`. cmin Sets the lower bound of the color domain. Has an effect only if colors is set to a numerical array. Value should have the same units as colors and if set, `marker.cmax` must be set as well. coloraxis Sets a reference to a shared color axis. References to these shared color axes are "coloraxis", "coloraxis2", "coloraxis3", etc. Settings for these shared color axes are set in the layout, under `layout.coloraxis`, `layout.coloraxis2`, etc. Note that multiple color scales can be linked to the same color axis. colorbar :class:`plotly.graph_objects.icicle.marker.ColorBar` instance or dict with compatible properties colors Sets the color of each sector of this trace. If not specified, the default trace color set is used to pick the sector colors. colorscale Sets the colorscale. Has an effect only if colors is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use `marker.cmin` and `marker.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Blackbody,Bluered,Blues,Cividis,Earth,E lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd Bu,Reds,Viridis,YlGnBu,YlOrRd. colorssrc Sets the source reference on Chart Studio Cloud for `colors`. line :class:`plotly.graph_objects.icicle.marker.Line` instance or dict with compatible properties pattern Sets the pattern within the marker. reversescale Reverses the color mapping if true. Has an effect only if colors is set to a numerical array. If true, `marker.cmin` will correspond to the last color in the array and `marker.cmax` will correspond to the first color. showscale Determines whether or not a colorbar is displayed for this trace. Has an effect only if colors is set to a numerical array. Returns ------- Marker """ super().__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.icicle.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.icicle.Marker`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("autocolorscale", arg, autocolorscale) self._set_property("cauto", arg, cauto) self._set_property("cmax", arg, cmax) self._set_property("cmid", arg, cmid) self._set_property("cmin", arg, cmin) self._set_property("coloraxis", arg, coloraxis) self._set_property("colorbar", arg, colorbar) self._set_property("colors", arg, colors) self._set_property("colorscale", arg, colorscale) self._set_property("colorssrc", arg, colorssrc) self._set_property("line", arg, line) self._set_property("pattern", arg, pattern) self._set_property("reversescale", arg, reversescale) self._set_property("showscale", arg, showscale) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Marker
python
rapidsai__cudf
python/cudf/cudf/core/udf/nrt_utils.py
{ "start": 306, "end": 1247 }
class ____: """ Context manager for determining if NRT is needed. Managed types may set use_nrt to be true during instantiation to signal that NRT must be enabled during code generation. """ def __init__(self): self.use_nrt = False def __enter__(self): self._token = _current_nrt_context.set(self) return self def __exit__(self, exc_type, exc_val, exc_tb): _current_nrt_context.reset(self._token) @contextmanager def nrt_enabled(): """ Context manager for enabling NRT via the numba config. CUDA_ENABLE_NRT may be toggled dynamically for a single kernel launch, so we use this context to enable it for those that we know need it. """ original_value = getattr(numba_config, "CUDA_ENABLE_NRT", False) numba_config.CUDA_ENABLE_NRT = True try: yield finally: numba_config.CUDA_ENABLE_NRT = original_value
CaptureNRTUsage
python
pytorch__pytorch
test/test_serialization.py
{ "start": 41559, "end": 46029 }
class ____(TestCase, SerializationMixin): # unique_key is necessary because on Python 2.7, if a warning passed to # the warning module is the same, it is not raised again. def _test_serialization_container(self, unique_key, filecontext_lambda): tmpmodule_name = f'tmpmodule{unique_key}' def import_module(name, filename): import importlib.util spec = importlib.util.spec_from_file_location(name, filename) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) sys.modules[module.__name__] = module return module with filecontext_lambda() as checkpoint: fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing', '_internal', 'data', 'network1.py') module = import_module(tmpmodule_name, fname) torch.save(module.Net(), checkpoint) # First check that the checkpoint can be loaded without warning about unsafe loads checkpoint.seek(0) with warnings.catch_warnings(record=True) as w: # weights_only=False as this is legacy code that saves the model loaded = torch.load(checkpoint, weights_only=False) self.assertTrue(isinstance(loaded, module.Net)) if can_retrieve_source: self.assertEqual(len(w), 0) # Replace the module with different source fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing', '_internal', 'data', 'network2.py') module = import_module(tmpmodule_name, fname) checkpoint.seek(0) with warnings.catch_warnings(record=True) as w: # weights_only=False as this is legacy code that saves the model loaded = torch.load(checkpoint, weights_only=False) self.assertTrue(isinstance(loaded, module.Net)) if can_retrieve_source: self.assertEqual(len(w), 1) self.assertEqual(w[0].category, SourceChangeWarning) def test_serialization_container(self): self._test_serialization_container('file', tempfile.NamedTemporaryFile) def test_serialization_container_filelike(self): self._test_serialization_container('filelike', BytesIOContext) def test_serialization_offset(self): a = torch.randn(5, 5) b = torch.randn(1024, 1024, 512, dtype=torch.float32) m = torch.nn.Conv2d(1, 1, (1, 3)) i, j = 41, 43 with tempfile.NamedTemporaryFile() as f: pickle.dump(i, f) torch.save(a, f) pickle.dump(j, f) torch.save(b, f) torch.save(m, f) self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024) f.seek(0) i_loaded = pickle.load(f) a_loaded = torch.load(f) j_loaded = pickle.load(f) b_loaded = torch.load(f) # weights_only=False as this is legacy code that saves the model m_loaded = torch.load(f, weights_only=False) self.assertTrue(torch.equal(a, a_loaded)) self.assertTrue(torch.equal(b, b_loaded)) self.assertTrue(m.kernel_size == m_loaded.kernel_size) self.assertEqual(i, i_loaded) self.assertEqual(j, j_loaded) @parametrize('weights_only', (True, False)) def test_serialization_offset_filelike(self, weights_only): a = torch.randn(5, 5) b = torch.randn(1024, 1024, 512, dtype=torch.float32) i, j = 41, 43 with BytesIOContext() as f: pickle.dump(i, f) torch.save(a, f) pickle.dump(j, f) torch.save(b, f) self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024) f.seek(0) i_loaded = pickle.load(f) a_loaded = torch.load(f, weights_only=weights_only) j_loaded = pickle.load(f) b_loaded = torch.load(f, weights_only=weights_only) self.assertTrue(torch.equal(a, a_loaded)) self.assertTrue(torch.equal(b, b_loaded)) self.assertEqual(i, i_loaded) self.assertEqual(j, j_loaded) def run(self, *args, **kwargs): with serialization_method(use_zip=False): return super().run(*args, **kwargs)
TestOldSerialization
python
readthedocs__readthedocs.org
readthedocs/doc_builder/base.py
{ "start": 405, "end": 1857 }
class ____: """The Base for all Builders. Defines the API for subclasses.""" ignore_patterns = [] def __init__(self, build_env, python_env): self.build_env = build_env self.python_env = python_env self.version = build_env.version self.project = build_env.project self.config = python_env.config if python_env else None self.project_path = self.project.checkout_path(self.version.slug) self.api_client = self.build_env.api_client def get_final_doctype(self): """Some builders may have a different doctype at build time.""" return self.config.doctype def show_conf(self): """Show the configuration used for this builder.""" def build(self): """Do the actual building of the documentation.""" raise NotImplementedError def _post_build(self): """Execute extra steps (e.g. create ZIP, rename PDF, etc) after building if required.""" def docs_dir(self): """Handle creating a custom docs_dir if it doesn't exist.""" for doc_dir_name in ["docs", "doc", "Doc", "book"]: possible_path = os.path.join(self.project_path, doc_dir_name) if os.path.exists(possible_path): return possible_path return self.project_path def run(self, *args, **kwargs): """Proxy run to build environment.""" return self.build_env.run(*args, **kwargs)
BaseBuilder
python
readthedocs__readthedocs.org
readthedocs/proxito/views/hosting.py
{ "start": 2470, "end": 3073 }
class ____(permissions.BasePermission): """ Checks if the user from the request has permissions to see the project. This is only valid if the view doesn't have a version, since the version permissions must be checked by the IsAuthorizedToViewVersion permission. """ def has_permission(self, request, view): project = view._get_project() version = view._get_version() if version: return False has_access = Project.objects.public(user=request.user).filter(pk=project.pk).exists() return has_access
IsAuthorizedToViewProject
python
gevent__gevent
src/greentest/3.11/test_selectors.py
{ "start": 15137, "end": 17310 }
class ____: # see issue #18963 for why it's skipped on older OS X versions @support.requires_mac_ver(10, 5) @unittest.skipUnless(resource, "Test needs resource module") @support.requires_resource('cpu') def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise try: fds = s.select() except OSError as e: if e.errno == errno.EINVAL and sys.platform == 'darwin': # unexplainable errors on macOS don't need to fail the test self.skipTest("Invalid argument error calling poll()") raise self.assertEqual(NUM_FDS // 2, len(fds))
ScalableSelectorMixIn
python
django__django
django/contrib/gis/forms/widgets.py
{ "start": 3088, "end": 3572 }
class ____(OpenLayersWidget): """ An OpenLayers/OpenStreetMap-based widget. """ base_layer = "osm" default_lon = 5 default_lat = 47 default_zoom = 12 def __init__(self, attrs=None): if attrs is None: attrs = {} attrs.setdefault("default_lon", self.default_lon) attrs.setdefault("default_lat", self.default_lat) attrs.setdefault("default_zoom", self.default_zoom) super().__init__(attrs=attrs)
OSMWidget
python
Pylons__pyramid
tests/test_config/test_adapters.py
{ "start": 12714, "end": 13035 }
class ____(unittest.TestCase): def _callFUT(self, callee): from pyramid.config.adapters import eventonly return eventonly(callee) def test_defaults(self): def acallable(event, a=1, b=2): # pragma: no cover pass self.assertTrue(self._callFUT(acallable))
Test_eventonly
python
getsentry__sentry
src/sentry/grouping/component.py
{ "start": 8924, "end": 9012 }
class ____(BaseGroupingComponent[str]): id: str = "filename"
FilenameGroupingComponent
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py
{ "start": 6865, "end": 8873 }
class ____: @pytest.fixture def time_freezer(self) -> Generator: freezer = time_machine.travel(DEFAULT_DATE, tick=False) freezer.start() yield freezer.stop() @pytest.fixture(autouse=True) def setup(self): clear_db_assets() clear_db_runs() clear_db_dags() clear_db_dag_bundles() clear_db_logs() yield clear_db_assets() clear_db_runs() clear_db_dags() clear_db_dag_bundles() clear_db_logs() @provide_session def create_assets(self, session, num: int = 2) -> list[AssetModel]: return _create_assets(session=session, num=num) @provide_session def create_assets_with_watchers(self, session, num: int = 2) -> list[AssetModel]: return _create_assets_with_watchers(session=session, num=num) @provide_session def create_assets_with_sensitive_extra(self, session, num: int = 2): _create_assets_with_sensitive_extra(session=session, num=num) @provide_session def create_provided_asset(self, session, asset: AssetModel): _create_provided_asset(session=session, asset=asset) @provide_session def create_assets_events(self, session, num: int = 2, varying_timestamps: bool = False): _create_assets_events(session=session, num=num, varying_timestamps=varying_timestamps) @provide_session def create_assets_events_with_sensitive_extra(self, session, num: int = 2): _create_assets_events_with_sensitive_extra(session=session, num=num) @provide_session def create_provided_asset_event(self, session, asset_event: AssetEvent): _create_provided_asset_event(session=session, asset_event=asset_event) @provide_session def create_dag_run(self, session, num: int = 2): _create_dag_run(num=num, session=session) @provide_session def create_asset_dag_run(self, session, num: int = 2): _create_asset_dag_run(num=num, session=session)
TestAssets
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 259654, "end": 299652 }
class ____(Request): """ Edit task's details. :param task: ID of the task :type task: str :param force: If not true, call fails if the task status is not 'created' :type force: bool :param name: Task name Unique within the company. :type name: str :param tags: User-defined tags list :type tags: Sequence[str] :param system_tags: System tags list. This field is reserved for system use, please don't use it. :type system_tags: Sequence[str] :param type: Type of task :type type: TaskTypeEnum :param comment: Free text comment :type comment: str :param parent: Parent task id Must be a completed task. :type parent: str :param project: Project ID of the project to which this task is assigned Must exist[ab] :type project: str :param input: Task input params. (input view must be provided). :type input: Input :param output_dest: Output storage id Must be a reference to an existing storage. :type output_dest: str :param execution: Task execution params :type execution: Execution :param script: Script info :type script: Script :param hyperparams: Task hyper params per section :type hyperparams: dict :param configuration: Task configuration params :type configuration: dict :param models: Task models :type models: TaskModels :param container: Docker container parameters :type container: dict :param runtime: Task runtime mapping :type runtime: dict """ _service = "tasks" _action = "edit" _version = "2.23" _schema = { "definitions": { "artifact": { "properties": { "content_size": { "description": "Raw data length in bytes", "type": "integer", }, "display_data": { "description": "User-defined list of key/value pairs, sorted", "items": {"items": {"type": "string"}, "type": "array"}, "type": "array", }, "hash": { "description": "Hash of entire raw data", "type": "string", }, "key": {"description": "Entry key", "type": "string"}, "mode": { "$ref": "#/definitions/artifact_mode_enum", "description": "System defined input/output indication", }, "timestamp": { "description": "Epoch time when artifact was created", "type": "integer", }, "type": { "description": "System defined type", "type": "string", }, "type_data": { "$ref": "#/definitions/artifact_type_data", "description": "Additional fields defined by the system", }, "uri": {"description": "Raw data location", "type": "string"}, }, "required": ["key", "type"], "type": "object", }, "artifact_mode_enum": { "default": "output", "enum": ["input", "output"], "type": "string", }, "artifact_type_data": { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", }, "augmentation": { "properties": { "crop_around_rois": { "description": "Crop image data around all frame ROIs", "type": ["boolean", "null"], }, "sets": { "description": "List of augmentation sets", "items": {"$ref": "#/definitions/augmentation_set"}, "type": ["array", "null"], }, }, "type": "object", }, "augmentation_set": { "properties": { "arguments": { "additionalProperties": { "additionalProperties": True, "type": "object", }, "description": "Arguments dictionary per custom augmentation type.", "type": ["object", "null"], }, "cls": { "description": "Augmentation class", "type": ["string", "null"], }, "strength": { "description": "Augmentation strength. Range [0,).", "minimum": 0, "type": ["number", "null"], }, "types": { "description": "Augmentation type", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", }, "configuration_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. Should be unique", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "execution": { "properties": { "artifacts": { "description": "Task artifacts", "items": {"$ref": "#/definitions/artifact"}, "type": ["array", "null"], }, "dataviews": { "description": "Additional dataviews for the task", "items": {"additionalProperties": True, "type": "object"}, "type": ["array", "null"], }, "docker_cmd": { "description": "Command for running docker script for the execution of the task", "type": ["string", "null"], }, "framework": { "description": ( "Framework related to the task. Case insensitive. Mandatory for Training tasks. " ), "type": ["string", "null"], }, "model": { "description": "Execution input model ID Not applicable for Register (Import) tasks", "type": ["string", "null"], }, "model_desc": { "additionalProperties": True, "description": "Json object representing the Model descriptors", "type": ["object", "null"], }, "model_labels": { "additionalProperties": {"type": "integer"}, "description": ( "Json object representing the ids of the labels in the model.\n The keys are the" " layers' names and the values are the IDs.\n Not applicable for Register" " (Import) tasks.\n Mandatory for Training tasks" ), "type": ["object", "null"], }, "parameters": { "additionalProperties": True, "description": "Json object containing the Task parameters", "type": ["object", "null"], }, "queue": { "description": "Queue ID where task was queued.", "type": ["string", "null"], }, "test_split": { "description": "Percentage of frames to use for testing only", "type": ["integer", "null"], }, }, "type": "object", }, "filter_by_roi_enum": { "default": "label_rules", "enum": ["disabled", "no_rois", "label_rules"], "type": "string", }, "filter_label_rule": { "properties": { "conf_range": { "description": ( "Range of ROI confidence level in the frame (min, max). -1 for not applicable\n " " Both min and max can be either -1 or positive.\n 2nd number (max) must be" " either -1 or larger than or equal to the 1st number (min)" ), "items": {"type": "number"}, "maxItems": 2, "minItems": 1, "type": "array", }, "count_range": { "description": ( "Range of times ROI appears in the frame (min, max). -1 for not applicable.\n " " Both integers must be larger than or equal to -1.\n 2nd integer (max) must be" " either -1 or larger than or equal to the 1st integer (min)" ), "items": {"type": "integer"}, "maxItems": 2, "minItems": 1, "type": "array", }, "label": { "description": ( "Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and" " default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent" " to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'" ), "type": "string", }, "must_not": { "default": False, "description": ( "If set then the label must not exist or lucene query must not be true.\n The" " default value is false" ), "type": "boolean", }, }, "required": ["label"], "type": "object", }, "filter_rule": { "properties": { "dataset": { "description": ( "Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in" " View are used." ), "type": "string", }, "filter_by_roi": { "description": "Type of filter. Optional, the default value is 'label_rules'", "oneOf": [ {"$ref": "#/definitions/filter_by_roi_enum"}, {"type": "null"}, ], }, "frame_query": { "description": "Frame filter, in Lucene query syntax", "type": ["string", "null"], }, "label_rules": { "description": ( "List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all" " frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without" " ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules" ), "items": {"$ref": "#/definitions/filter_label_rule"}, "type": ["array", "null"], }, "sources_query": { "description": "Sources filter, in Lucene query syntax. Filters sources in each frame.", "type": ["string", "null"], }, "version": { "description": ( "Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If" " set to '*' all version of the datasets in View are used." ), "type": "string", }, "weight": { "description": "Rule weight. Default is 1", "type": "number", }, }, "required": ["dataset"], "type": "object", }, "filtering": { "properties": { "filtering_rules": { "description": "List of FilterRule ('OR' connection)", "items": {"$ref": "#/definitions/filter_rule"}, "type": ["array", "null"], }, "output_rois": { "description": ( "'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which" " led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be" " returned multiple times with a different roi each time.\n\nNote: this should be used for" " Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be" " returned\n " ), "oneOf": [ {"$ref": "#/definitions/output_rois_enum"}, {"type": "null"}, ], }, }, "type": "object", }, "input": { "properties": { "augmentation": { "description": "Augmentation parameters. Only for training and testing tasks.", "oneOf": [ {"$ref": "#/definitions/augmentation"}, {"type": "null"}, ], }, "dataviews": { "additionalProperties": {"type": "string"}, "description": "Key to DataView ID Mapping", "type": ["object", "null"], }, "frames_filter": { "description": "Filtering params", "oneOf": [ {"$ref": "#/definitions/filtering"}, {"type": "null"}, ], }, "iteration": { "description": "Iteration parameters. Not applicable for register (import) tasks.", "oneOf": [ {"$ref": "#/definitions/iteration"}, {"type": "null"}, ], }, "mapping": { "description": "Mapping params (see common definitions section)", "oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}], }, "view": { "description": "View params", "oneOf": [{"$ref": "#/definitions/view"}, {"type": "null"}], }, }, "type": "object", }, "iteration": { "description": "Sequential Iteration API configuration", "properties": { "infinite": { "description": "Infinite iteration", "type": ["boolean", "null"], }, "jump": { "description": "Jump entry", "oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}], }, "limit": { "description": ( "Maximum frames per task. If not passed, frames will end when no more matching frames are" " found, unless infinite is True." ), "type": ["integer", "null"], }, "min_sequence": { "description": ( "Length (in ms) of video clips to return. This is used in random order, and in sequential" " order only if jumping is provided and only for video frames" ), "type": ["integer", "null"], }, "order": { "description": ( "\n Input frames order. Values: 'sequential', 'random'\n In" " Sequential mode frames will be returned according to the order in which the frames were" " added to the dataset." ), "type": ["string", "null"], }, "random_seed": { "description": "Random seed used during iteration", "type": "integer", }, }, "required": ["random_seed"], "type": "object", }, "jump": { "properties": { "time": { "description": "Max time in milliseconds between frames", "type": ["integer", "null"], } }, "type": "object", }, "label_source": { "properties": { "dataset": { "description": "Source dataset id. '*' for all datasets in view", "type": ["string", "null"], }, "labels": { "description": ( "List of source labels (AND connection). '*' indicates any label. Labels must exist in at" " least one of the dataset versions in the task's view" ), "items": {"type": "string"}, "type": ["array", "null"], }, "version": { "description": ( "Source dataset version id. Default is '*' (for all versions in dataset in the view)" " Version must belong to the selected dataset, and must be in the task's view[i]" ), "type": ["string", "null"], }, }, "type": "object", }, "mapping": { "properties": { "rules": { "description": "Rules list", "items": {"$ref": "#/definitions/mapping_rule"}, "type": ["array", "null"], } }, "type": "object", }, "mapping_rule": { "properties": { "source": { "description": "Source label info", "oneOf": [ {"$ref": "#/definitions/label_source"}, {"type": "null"}, ], }, "target": { "description": "Target label name", "type": ["string", "null"], }, }, "type": "object", }, "output_rois_enum": { "enum": ["all_in_frame", "only_filtered", "frame_per_roi"], "type": "string", }, "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "script": { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": ( "Repository branch id If not provided and tag not provided, default repository branch " "is used." ), "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": { "description": "Repository tag", "type": ["string", "null"], }, "version_num": { "description": ( "Version (changeset) number. Optional (default is head version) Unused if tag is provided." ), "type": ["string", "null"], }, "working_dir": { "description": ( "Path to the folder from which to run the script Default - root folder of repository" ), "type": ["string", "null"], }, }, "type": "object", }, "section_params": { "additionalProperties": {"$ref": "#/definitions/params_item"}, "description": "Task section params", "type": "object", }, "task_model_item": { "properties": { "model": {"description": "The model ID", "type": "string"}, "name": { "description": "The task model name", "type": "string", }, }, "required": ["name", "model"], "type": "object", }, "task_models": { "properties": { "input": { "description": "The list of task input models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, "output": { "description": "The list of task output models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, }, "type": "object", }, "task_type_enum": { "enum": [ "dataset_import", "annotation", "annotation_manual", "training", "testing", "inference", "data_processing", "application", "monitor", "controller", "optimizer", "service", "qc", "custom", ], "type": "string", }, "view": { "properties": { "entries": { "description": "List of view entries. All tasks must have at least one view.", "items": {"$ref": "#/definitions/view_entry"}, "type": ["array", "null"], } }, "type": "object", }, "view_entry": { "properties": { "dataset": { "description": "Existing Dataset id", "type": ["string", "null"], }, "merge_with": { "description": "Version ID to merge with", "type": ["string", "null"], }, "version": { "description": "Version id of a version belonging to the dataset", "type": ["string", "null"], }, }, "type": "object", }, }, "properties": { "comment": {"description": "Free text comment ", "type": "string"}, "configuration": { "additionalProperties": {"$ref": "#/definitions/configuration_item"}, "description": "Task configuration params", "type": "object", }, "container": { "additionalProperties": {"type": ["string", "null"]}, "description": "Docker container parameters", "type": "object", }, "execution": { "$ref": "#/definitions/execution", "description": "Task execution params", }, "force": { "default": False, "description": "If not true, call fails if the task status is not 'created'", "type": "boolean", }, "hyperparams": { "additionalProperties": {"$ref": "#/definitions/section_params"}, "description": "Task hyper params per section", "type": "object", }, "input": { "$ref": "#/definitions/input", "description": "Task input params. (input view must be provided).", }, "models": { "$ref": "#/definitions/task_models", "description": "Task models", }, "name": { "description": "Task name Unique within the company.", "type": "string", }, "output_dest": { "description": "Output storage id Must be a reference to an existing storage.", "type": "string", }, "parent": { "description": "Parent task id Must be a completed task.", "type": "string", }, "project": { "description": "Project ID of the project to which this task is assigned Must exist[ab]", "type": "string", }, "runtime": { "description": "Task runtime mapping", "type": "object", "additionalProperties": True, }, "script": {"$ref": "#/definitions/script", "description": "Script info"}, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": "array", }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": "array", }, "task": {"description": "ID of the task", "type": "string"}, "type": { "$ref": "#/definitions/task_type_enum", "description": "Type of task", }, }, "required": ["task"], "type": "object", } def __init__( self, task, force=False, name=None, tags=None, system_tags=None, type=None, comment=None, parent=None, project=None, input=None, output_dest=None, execution=None, script=None, hyperparams=None, configuration=None, models=None, container=None, runtime=None, **kwargs ): super(EditRequest, self).__init__(**kwargs) self.task = task self.force = force self.name = name self.tags = tags self.system_tags = system_tags self.type = type self.comment = comment self.parent = parent self.project = project self.input = input self.output_dest = output_dest self.execution = execution self.script = script self.hyperparams = hyperparams self.configuration = configuration self.models = models self.container = container self.runtime = runtime @schema_property("task") def task(self): return self._property_task @task.setter def task(self, value): if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("force") def force(self): return self._property_force @force.setter def force(self, value): if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value @schema_property("name") def name(self): return self._property_name @name.setter def name(self, value): if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("tags") def tags(self): return self._property_tags @tags.setter def tags(self, value): if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self): return self._property_system_tags @system_tags.setter def system_tags(self, value): if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("type") def type(self): return self._property_type @type.setter def type(self, value): if value is None: self._property_type = None return if isinstance(value, six.string_types): try: value = TaskTypeEnum(value) except ValueError: pass else: self.assert_isinstance(value, "type", enum.Enum) self._property_type = value @schema_property("comment") def comment(self): return self._property_comment @comment.setter def comment(self, value): if value is None: self._property_comment = None return self.assert_isinstance(value, "comment", six.string_types) self._property_comment = value @schema_property("parent") def parent(self): return self._property_parent @parent.setter def parent(self, value): if value is None: self._property_parent = None return self.assert_isinstance(value, "parent", six.string_types) self._property_parent = value @schema_property("project") def project(self): return self._property_project @project.setter def project(self, value): if value is None: self._property_project = None return self.assert_isinstance(value, "project", six.string_types) self._property_project = value @schema_property("input") def input(self): return self._property_input @input.setter def input(self, value): if value is None: self._property_input = None return if isinstance(value, dict): value = Input.from_dict(value) else: self.assert_isinstance(value, "input", Input) self._property_input = value @schema_property("output_dest") def output_dest(self): return self._property_output_dest @output_dest.setter def output_dest(self, value): if value is None: self._property_output_dest = None return self.assert_isinstance(value, "output_dest", six.string_types) self._property_output_dest = value @schema_property("execution") def execution(self): return self._property_execution @execution.setter def execution(self, value): if value is None: self._property_execution = None return if isinstance(value, dict): value = Execution.from_dict(value) else: self.assert_isinstance(value, "execution", Execution) self._property_execution = value @schema_property("script") def script(self): return self._property_script @script.setter def script(self, value): if value is None: self._property_script = None return if isinstance(value, dict): value = Script.from_dict(value) else: self.assert_isinstance(value, "script", Script) self._property_script = value @schema_property("hyperparams") def hyperparams(self): return self._property_hyperparams @hyperparams.setter def hyperparams(self, value): if value is None: self._property_hyperparams = None return self.assert_isinstance(value, "hyperparams", dict) self.assert_isinstance( value.keys(), "hyperparams_keys", six.string_types, is_array=True ) self.assert_isinstance( value.values(), "hyperparams_values", (SectionParams, dict), is_array=True ) value = dict( (k, SectionParams(**v) if isinstance(v, dict) else v) for k, v in value.items() ) self._property_hyperparams = value @schema_property("configuration") def configuration(self): return self._property_configuration @configuration.setter def configuration(self, value): if value is None: self._property_configuration = None return self.assert_isinstance(value, "configuration", dict) self.assert_isinstance( value.keys(), "configuration_keys", six.string_types, is_array=True ) self.assert_isinstance( value.values(), "configuration_values", (ConfigurationItem, dict), is_array=True, ) value = dict( (k, ConfigurationItem(**v) if isinstance(v, dict) else v) for k, v in value.items() ) self._property_configuration = value @schema_property("models") def models(self): return self._property_models @models.setter def models(self, value): if value is None: self._property_models = None return if isinstance(value, dict): value = TaskModels.from_dict(value) else: self.assert_isinstance(value, "models", TaskModels) self._property_models = value @schema_property("container") def container(self): return self._property_container @container.setter def container(self, value): if value is None: self._property_container = None return self.assert_isinstance(value, "container", (dict,)) self._property_container = value @schema_property("runtime") def runtime(self): return self._property_runtime @runtime.setter def runtime(self, value): if value is None: self._property_runtime = None return self.assert_isinstance(value, "runtime", (dict,)) self._property_runtime = value
EditRequest
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 68442, "end": 71446 }
class ____(_ConfigCreateModel): """This class defines the structure of a data property that a collection can have within Weaviate. Attributes: name: The name of the property, REQUIRED. data_type: The data type of the property, REQUIRED. description: A description of the property. index_filterable: Whether the property should be filterable in the inverted index. index_range_filters: Whether the property should support range filters in the inverted index. index_searchable: Whether the property should be searchable in the inverted index. nested_properties: nested properties for data type OBJECT and OBJECT_ARRAY`. skip_vectorization: Whether to skip vectorization of the property. Defaults to `False`. tokenization: The tokenization method to use for the inverted index. Defaults to `None`. vectorize_property_name: Whether to vectorize the property name. Defaults to `True`. """ name: str dataType: DataType = Field(default=..., alias="data_type") description: Optional[str] = Field(default=None) indexFilterable: Optional[bool] = Field(default=None, alias="index_filterable") indexSearchable: Optional[bool] = Field(default=None, alias="index_searchable") indexRangeFilters: Optional[bool] = Field(default=None, alias="index_range_filters") nestedProperties: Optional[Union["Property", List["Property"]]] = Field( default=None, alias="nested_properties" ) skip_vectorization: bool = Field(default=False) tokenization: Optional[Tokenization] = Field(default=None) vectorize_property_name: bool = Field(default=True) @field_validator("name") def _check_name(cls, v: str) -> str: if v in ["id", "vector"]: raise ValueError(f"Property name '{v}' is reserved and cannot be used") return v def _to_dict( self, vectorizers: Optional[Sequence[Union[Vectorizers, _EnumLikeStr]]] = None ) -> Dict[str, Any]: ret_dict = super()._to_dict() ret_dict["dataType"] = [ret_dict["dataType"]] if vectorizers is not None: for vectorizer in vectorizers: if vectorizer is not None and vectorizer != Vectorizers.NONE: if "moduleConfig" not in ret_dict: ret_dict["moduleConfig"] = {} ret_dict["moduleConfig"][vectorizer.value] = { "skip": self.skip_vectorization, "vectorizePropertyName": self.vectorize_property_name, } del ret_dict["skip_vectorization"] del ret_dict["vectorize_property_name"] if self.nestedProperties is not None: ret_dict["nestedProperties"] = ( [prop._to_dict() for prop in self.nestedProperties] if isinstance(self.nestedProperties, list) else [self.nestedProperties._to_dict()] ) return ret_dict
Property
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_set.py
{ "start": 65901, "end": 67196 }
class ____: """Regression test for bpo-46615""" constructor1 = None constructor2 = None def make_sets_of_bad_objects(self): with torch._dynamo.error_on_graph_break(False): class Bad: def __eq__(self, other): if not enabled: return False if randrange(20) == 0: set1.clear() if randrange(20) == 0: set2.clear() return bool(randrange(2)) def __hash__(self): return randrange(2) # Don't behave poorly during construction. enabled = False set1 = self.constructor1(Bad() for _ in range(randrange(50))) set2 = self.constructor2(Bad() for _ in range(randrange(50))) # Now start behaving poorly enabled = True return set1, set2 def check_set_op_does_not_crash(self, function): for _ in range(100): set1, set2 = self.make_sets_of_bad_objects() try: function(set1, set2) except RuntimeError as e: # Just make sure we don't crash here. self.assertIn("changed size during iteration", str(e))
_TestOperationsMutating
python
pytorch__pytorch
torch/distributed/elastic/metrics/api.py
{ "start": 1388, "end": 5751 }
class ____: def __init__(self, group_name: str, handler: MetricHandler): self.group_name = group_name self.handler = handler def add_value(self, metric_name: str, metric_value: int): self.handler.emit( MetricData(time.time(), self.group_name, metric_name, metric_value) ) _metrics_map: dict[str, MetricHandler] = {} _default_metrics_handler: MetricHandler = NullMetricHandler() # pyre-fixme[9]: group has type `str`; used as `None`. def configure(handler: MetricHandler, group: str | None = None): if group is None: global _default_metrics_handler # pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used # as `MetricHandler`. _default_metrics_handler = handler else: _metrics_map[group] = handler def getStream(group: str): handler = _metrics_map.get(group, _default_metrics_handler) return MetricStream(group, handler) def _get_metric_name(fn): qualname = fn.__qualname__ split = qualname.split(".") if len(split) == 1: module = fn.__module__ if module: return module.split(".")[-1] + "." + split[0] else: return split[0] else: return qualname def prof(fn=None, group: str = "torchelastic"): r""" @profile decorator publishes duration.ms, count, success, failure metrics for the function that it decorates. The metric name defaults to the qualified name (``class_name.def_name``) of the function. If the function does not belong to a class, it uses the leaf module name instead. Usage :: @metrics.prof def x(): pass @metrics.prof(group="agent") def y(): pass """ def wrap(f): @wraps(f) def wrapper(*args, **kwargs): key = _get_metric_name(f) try: start = time.time() result = f(*args, **kwargs) put_metric(f"{key}.success", 1, group) except Exception: put_metric(f"{key}.failure", 1, group) raise finally: put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group) # type: ignore[possibly-undefined] return result return wrapper if fn: return wrap(fn) else: return wrap @deprecated("Deprecated, use `@prof` instead", category=FutureWarning) def profile(group=None): """ @profile decorator adds latency and success/failure metrics to any given function. Usage :: @metrics.profile("my_metric_group") def some_function(<arguments>): """ def wrap(func): @wraps(func) def wrapper(*args, **kwargs): try: start_time = time.time() result = func(*args, **kwargs) # pyrefly: ignore [bad-argument-type] publish_metric(group, f"{func.__name__}.success", 1) except Exception: # pyrefly: ignore [bad-argument-type] publish_metric(group, f"{func.__name__}.failure", 1) raise finally: publish_metric( # pyrefly: ignore [bad-argument-type] group, f"{func.__name__}.duration.ms", get_elapsed_time_ms(start_time), # type: ignore[possibly-undefined] ) return result return wrapper return wrap def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"): """ Publish a metric data point. Usage :: put_metric("metric_name", 1) put_metric("metric_name", 1, "metric_group_name") """ getStream(metric_group).add_value(metric_name, metric_value) @deprecated( "Deprecated, use `put_metric(metric_group)(metric_name, metric_value)` instead", category=FutureWarning, ) def publish_metric(metric_group: str, metric_name: str, metric_value: int): metric_stream = getStream(metric_group) metric_stream.add_value(metric_name, metric_value) def get_elapsed_time_ms(start_time_in_seconds: float): """Return the elapsed time in millis from the given start time.""" end_time = time.time() return int((end_time - start_time_in_seconds) * 1000)
MetricStream
python
uqfoundation__dill
dill/tests/test_detect.py
{ "start": 1710, "end": 2802 }
class ____: def _method(self): pass def ok(self): return True def test_globals(): def f(): a def g(): b def h(): c assert globalvars(f) == dict(a=1, b=2, c=3) res = globalvars(foo, recurse=True) assert set(res) == set(['squared', 'a']) res = globalvars(foo, recurse=False) assert res == {} zap = foo(2) res = globalvars(zap, recurse=True) assert set(res) == set(['squared', 'a']) res = globalvars(zap, recurse=False) assert set(res) == set(['squared']) del zap res = globalvars(squared) assert set(res) == set(['a']) # FIXME: should find referenced __builtins__ #res = globalvars(_class, recurse=True) #assert set(res) == set(['True']) #res = globalvars(_class, recurse=False) #assert res == {} #res = globalvars(_class.ok, recurse=True) #assert set(res) == set(['True']) #res = globalvars(_class.ok, recurse=False) #assert set(res) == set(['True']) #98 dill ignores __getstate__ in interactive lambdas bar = [0]
_class
python
getsentry__sentry
tests/sentry/search/events/builder/test_metrics.py
{ "start": 119523, "end": 139558 }
class ____(MetricBuilderBaseTest): def test_run_query_with_on_demand_distribution(self) -> None: field = "p75(measurements.fp)" query_s = "transaction.duration:>=100" spec = OnDemandMetricSpec(field=field, query=query_s, spec_type=MetricSpecType.SIMPLE_QUERY) self.store_transaction_metric( value=200, metric=TransactionMetricKey.DIST_ON_DEMAND.value, internal_metric=TransactionMRI.DIST_ON_DEMAND.value, entity="metrics_distributions", tags={"query_hash": spec.query_hash}, timestamp=self.start, ) query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) result = query.run_query("test_query") assert result["data"] == [{"d:transactions/on_demand@none": 200.0}] meta = result["meta"] assert len(meta) == 1 assert meta[0]["name"] == "d:transactions/on_demand@none" def test_run_query_with_on_demand_count_and_environments(self) -> None: field = "count(measurements.fp)" query_s = "transaction.duration:>=100" self.create_environment(project=self.project, name="prod") # We want to test also with "dev" that is not in the database, to check that we fallback to avoiding the # environment filter at all. environments = ((None, 100), ("prod", 200), ("dev", 300)) specs = [] for environment, value in environments: spec = OnDemandMetricSpec( field=field, query=query_s, environment=environment, spec_type=MetricSpecType.SIMPLE_QUERY, ) self.store_transaction_metric( value=value, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash}, timestamp=self.start, ) specs.append(spec) expected_environments = ((None, 100), ("prod", 200), ("dev", 100)) for (environment, value), spec in zip(expected_environments, specs): params = ( self.params if environment is None else {**self.params, "environment": [environment]} ) query = AlertMetricsQueryBuilder( params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) assert query._on_demand_metric_spec_map == { "count(measurements.fp)": OnDemandMetricSpec( field=field, query=query_s, environment=environment, spec_type=MetricSpecType.SIMPLE_QUERY, ) } result = query.run_query("test_query") assert result["data"] == [{"c:transactions/on_demand@none": float(value)}] meta = result["meta"] assert len(meta) == 1 assert meta[0]["name"] == "c:transactions/on_demand@none" def test_run_query_with_on_demand_failure_rate(self) -> None: field = "failure_rate()" query_s = "transaction.duration:>=100" spec = OnDemandMetricSpec(field=field, query=query_s, spec_type=MetricSpecType.SIMPLE_QUERY) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash, "failure": "true"}, timestamp=self.start, ) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash}, timestamp=self.start, ) query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) result = query.run_query("test_query") # (1 failure / 2 total) = 0.5 assert result["data"] == [{"c:transactions/on_demand@none": 0.5}] meta = result["meta"] assert len(meta) == 1 assert meta[0]["name"] == "c:transactions/on_demand@none" def test_run_query_with_on_demand_apdex(self) -> None: field = "apdex(10)" query_s = "transaction.duration:>=100" spec = OnDemandMetricSpec(field=field, query=query_s, spec_type=MetricSpecType.SIMPLE_QUERY) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash, "satisfaction": "satisfactory"}, timestamp=self.start, ) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash, "satisfaction": "tolerable"}, timestamp=self.start, ) query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) result = query.run_query("test_query") # (1 satisfactory + (1 tolerable / 2)) / (2 total) = 0.75 assert result["data"] == [{"c:transactions/on_demand@none": 0.75}] meta = result["meta"] assert len(meta) == 1 assert meta[0]["name"] == "c:transactions/on_demand@none" def test_run_query_with_on_demand_count_and_time_range_required_and_not_supplied(self) -> None: params = { "organization_id": self.organization.id, "project_id": self.projects, } query = AlertMetricsQueryBuilder( params, granularity=3600, time_range_window=3600, query="transaction.duration:>=100", dataset=Dataset.PerformanceMetrics, selected_columns=["count(transaction.duration)"], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, # We set here the skipping of conditions, since this is true for alert subscriptions, but we want to verify # whether our secondary error barrier works. skip_time_conditions=True, ), ) with pytest.raises(IncompatibleMetricsQuery): query.run_query("test_query") def test_get_snql_query_with_on_demand_distribution_and_time_range_not_required_and_not_supplied( self, ): params = { "organization_id": self.organization.id, "project_id": self.projects, } query = AlertMetricsQueryBuilder( params, granularity=3600, time_range_window=3600, query="transaction.duration:>=100", dataset=Dataset.PerformanceMetrics, selected_columns=["p75(measurements.fp)"], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, # We want to test the snql generation when a time range is not supplied, which is the case for alert # subscriptions. skip_time_conditions=True, ), ) snql_request = query.get_snql_query() assert snql_request.dataset == "generic_metrics" snql_query = snql_request.query self.assertCountEqual( [ Function( "arrayElement", [ Function( "quantilesIf(0.75)", [ Column("value"), Function( "equals", [ Column("metric_id"), indexer.resolve( UseCaseID.TRANSACTIONS, 1, "d:transactions/on_demand@none", ), ], ), ], ), 1, ], "d:transactions/on_demand@none", ) ], snql_query.select, ) query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, 1, QUERY_HASH_KEY) query_hash_clause = Condition( lhs=Column(name=f"tags_raw[{query_hash_index}]"), op=Op.EQ, rhs="62b395db" ) assert query_hash_clause in snql_query.where def test_get_snql_query_with_on_demand_count_and_time_range_required_and_supplied(self) -> None: query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query="transaction.duration:>=100", dataset=Dataset.PerformanceMetrics, selected_columns=["count(transaction.duration)"], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, # We want to test the snql generation when a time range is supplied. skip_time_conditions=False, ), ) snql_request = query.get_snql_query() assert snql_request.dataset == "generic_metrics" snql_query = snql_request.query self.assertCountEqual( [ Function( "sumIf", [ Column("value"), Function( "equals", [ Column("metric_id"), indexer.resolve( UseCaseID.TRANSACTIONS, 1, "c:transactions/on_demand@none", ), ], ), ], "c:transactions/on_demand@none", ) ], snql_query.select, ) query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, 1, QUERY_HASH_KEY) start_time_clause = Condition(lhs=Column(name="timestamp"), op=Op.GTE, rhs=self.start) end_time_clause = Condition(lhs=Column(name="timestamp"), op=Op.LT, rhs=self.end) query_hash_clause = Condition( lhs=Column(name=f"tags_raw[{query_hash_index}]"), op=Op.EQ, rhs="88f3eb66" ) assert start_time_clause in snql_query.where assert end_time_clause in snql_query.where assert query_hash_clause in snql_query.where def test_run_query_with_spm_and_time_range_not_required_and_not_supplied(self) -> None: params = { "organization_id": self.organization.id, "project_id": self.projects, } query = AlertMetricsQueryBuilder( params, granularity=60, time_range_window=3600, query="span.module:db", dataset=Dataset.PerformanceMetrics, selected_columns=["spm()"], offset=None, config=QueryBuilderConfig( skip_time_conditions=True, use_metrics_layer=True, insights_metrics_override_metric_layer=True, ), ) snql_request = query.get_snql_query() assert snql_request.dataset == "generic_metrics" snql_query = snql_request.query self.assertCountEqual( [ Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), indexer.resolve( UseCaseID.SPANS, 1, "d:spans/exclusive_time@millisecond", ), ], ), ], ), Function("divide", [3600, 60]), ], "spm", ) ], snql_query.select, ) def test_run_query_with_on_demand_deprecation_flag_enabled(self) -> None: field = "count()" query_s = "" spec = OnDemandMetricSpec(field=field, query=query_s, spec_type=MetricSpecType.SIMPLE_QUERY) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash}, timestamp=self.start, ) with Feature("organizations:on-demand-gen-metrics-deprecation-query-prefill"): query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) # Verify the SNQL query structure uses on-demand metrics snql_request = query.get_snql_query() assert snql_request.dataset == "generic_metrics" snql_query = snql_request.query self.assertEqual( [ Function( "sumIf", [ Column("value"), Function( "equals", [ Column("metric_id"), indexer.resolve( UseCaseID.TRANSACTIONS, 1, "c:transactions/on_demand@none", ), ], ), ], "c:transactions/on_demand@none", ) ], snql_query.select, ) result = query.run_query("test_query") assert result["data"] == [{"c:transactions/on_demand@none": 1.0}] meta = result["meta"] assert len(meta) == 1 assert meta[0]["name"] == "c:transactions/on_demand@none" def test_run_query_with_on_demand_deprecation_flag_disabled(self) -> None: field = "count()" query_s = "" spec = OnDemandMetricSpec(field=field, query=query_s, spec_type=MetricSpecType.SIMPLE_QUERY) self.store_transaction_metric( value=1, metric=TransactionMetricKey.COUNT_ON_DEMAND.value, internal_metric=TransactionMRI.COUNT_ON_DEMAND.value, entity="metrics_counters", tags={"query_hash": spec.query_hash}, timestamp=self.start, ) self.store_transaction_metric( value=1, timestamp=self.start, ) query = AlertMetricsQueryBuilder( self.params, granularity=3600, time_range_window=3600, query=query_s, dataset=Dataset.PerformanceMetrics, selected_columns=[field], config=QueryBuilderConfig( use_metrics_layer=False, on_demand_metrics_enabled=True, on_demand_metrics_type=MetricSpecType.SIMPLE_QUERY, skip_time_conditions=False, ), ) assert not query.use_on_demand assert query._on_demand_metric_spec_map == {} # Verify the SNQL query structure uses standard metrics snql_request = query.get_snql_query() assert snql_request.dataset == "generic_metrics" snql_query = snql_request.query self.assertEqual( [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), indexer.resolve( UseCaseID.TRANSACTIONS, self.organization.id, "d:transactions/duration@millisecond", ), ], ), ], "count", ) ], snql_query.select, ) result = query.run_query("test_query") assert result["data"] == [{"count": 1}]
AlertMetricsQueryBuilderTest
python
html5lib__html5lib-python
html5lib/tests/tree_construction.py
{ "start": 2876, "end": 5726 }
class ____(pytest.Item): def __init__(self, name, parent, test, treeClass, namespaceHTMLElements): super(ParserTest, self).__init__(name, parent) self.test = test self.treeClass = treeClass self.namespaceHTMLElements = namespaceHTMLElements def runtest(self): if self.treeClass is None: pytest.skip("Treebuilder not loaded") p = html5parser.HTMLParser(tree=self.treeClass, namespaceHTMLElements=self.namespaceHTMLElements) input = self.test['data'] fragmentContainer = self.test['document-fragment'] expected = convertExpected(self.test['document']) expectedErrors = self.test['errors'].split("\n") if self.test['errors'] else [] scripting = False if 'script-on' in self.test: scripting = True with warnings.catch_warnings(): warnings.simplefilter("error") try: if fragmentContainer: document = p.parseFragment(input, fragmentContainer, scripting=scripting) else: document = p.parse(input, scripting=scripting) except constants.DataLossWarning: pytest.skip("data loss warning") output = convertTreeDump(p.tree.testSerializer(document)) expected = expected if self.namespaceHTMLElements: expected = namespaceExpected(r"\1<html \2>", expected) errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected, "\nReceived:", output]) assert expected == output, errorMsg errStr = [] for (line, col), errorcode, datavars in p.errors: assert isinstance(datavars, dict), "%s, %s" % (errorcode, repr(datavars)) errStr.append("Line: %i Col: %i %s" % (line, col, constants.E[errorcode] % datavars)) errorMsg2 = "\n".join(["\n\nInput:", input, "\nExpected errors (" + str(len(expectedErrors)) + "):\n" + "\n".join(expectedErrors), "\nActual errors (" + str(len(p.errors)) + "):\n" + "\n".join(errStr)]) if False: # we're currently not testing parse errors assert len(p.errors) == len(expectedErrors), errorMsg2 def repr_failure(self, excinfo): traceback = excinfo.traceback ntraceback = traceback.cut(path=__file__) pytest_ver = getattr(pytest, "version_tuple", ()) filter_args = (excinfo,) if pytest_ver >= (7, 4, 0) else () excinfo.traceback = ntraceback.filter(*filter_args) return excinfo.getrepr(funcargs=True, showlocals=False, style="short", tbfilter=False)
ParserTest
python
astropy__astropy
astropy/cosmology/_src/tests/funcs/test_funcs.py
{ "start": 2533, "end": 13217 }
class ____: def setup_class(self): self.cosmo = Planck13 def test_broadcast_arguments(self): """Test broadcast of arguments.""" # broadcasting main argument assert allclose( z_at_value(self.cosmo.age, [2, 7] * u.Gyr), [3.1981206134773115, 0.7562044333305182], rtol=1e-6, ) # basic broadcast of secondary arguments assert allclose( z_at_value( self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[0, 2.5], zmax=[2, 4], ), [0.681277696, 3.7914908], rtol=1e-6, ) # more interesting broadcast assert allclose( z_at_value( self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[[0, 2.5]], zmax=[2, 4], ), [[0.681277696, 3.7914908]], rtol=1e-6, ) def test_broadcast_bracket(self): """`bracket` has special requirements.""" # start with an easy one assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None), 3.1981206134773115, rtol=1e-6, ) # now actually have a bracket assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]), 3.1981206134773115, rtol=1e-6, ) # now a bad length with pytest.raises(ValueError, match="sequence"): z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5]) # now the wrong dtype : an ndarray, but not an object array with pytest.raises(TypeError, match="dtype"): z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4])) # now an object array of brackets bracket = np.array([[0, 4], [0, 3, 4]], dtype=object) assert allclose( z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket), [3.1981206134773115, 3.1981206134773115], rtol=1e-6, ) def test_bad_broadcast(self): """Shapes mismatch as expected""" with pytest.raises(ValueError, match="broadcast"): z_at_value( self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=[0, 2.5, 0.1], zmax=[2, 4], ) def test_scalar_input_to_output(self): """Test scalar input returns a scalar.""" z = z_at_value( self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=0, zmax=2 ) assert isinstance(z, u.Quantity) assert z.dtype == np.float64 assert z.shape == () @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_z_at_value_verbose(monkeypatch): cosmo = Planck13 # Test the "verbose" flag. Since this uses "print", need to mod stdout mock_stdout = StringIO() monkeypatch.setattr(sys, "stdout", mock_stdout) resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True) assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"]) def test_z_at_value_bracketed(method): """ Test 2 solutions for angular diameter distance by not constraining zmin, zmax, but setting `bracket` on the appropriate side of the turning point z. Setting zmin / zmax should override `bracket`. """ cosmo = Planck13 if method == "Bounded": with pytest.warns(AstropyUserWarning, match="fval is not bracketed"): z = z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method) if z > 1.6: z = 3.7914908 bracket = (0.9, 1.5) else: z = 0.6812777 bracket = (1.6, 2.0) with ( pytest.warns(UserWarning, match="Option 'bracket' is ignored"), pytest.warns(AstropyUserWarning, match="fval is not bracketed"), ): assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=bracket, ), z, rtol=1e-6, ) else: assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(0.3, 1.0), ), 0.6812777, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(2.0, 4.0), ), 3.7914908, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(0.1, 1.5), ), 0.6812777, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(0.1, 1.0, 2.0), ), 0.6812777, rtol=1e-6, ) with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"): assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(0.9, 1.5), ), 0.6812777, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(1.6, 2.0), ), 3.7914908, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(1.6, 2.0), zmax=1.6, ), 0.6812777, rtol=1e-6, ) assert allclose( z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(0.9, 1.5), zmin=1.5, ), 3.7914908, rtol=1e-6, ) if method == "Bounded": ctx_bracket = pytest.warns( UserWarning, match="Option 'bracket' is ignored by method Bounded" ) else: ctx_bracket = nullcontext() with ( pytest.raises(CosmologyError), pytest.warns(AstropyUserWarning, match="fval is not bracketed"), ctx_bracket, ): z_at_value( cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method, bracket=(3.9, 5.0), zmin=4.0, ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"]) def test_z_at_value_unconverged(method): """ Test warnings on non-converged solution when setting `maxfun` to too small iteration number - only 'Bounded' returns status value and specific message. """ cosmo = Planck18 ztol = {"Brent": [1e-4, 1e-4], "Golden": [1e-3, 1e-2], "Bounded": [1e-3, 1e-1]} if method == "Bounded": ctx = pytest.warns( AstropyUserWarning, match="Solver returned 1: Maximum number of function calls reached", ) else: ctx = pytest.warns(AstropyUserWarning, match="Solver returned None") with ctx: z0 = z_at_value( cosmo.angular_diameter_distance, 1 * u.Gpc, zmax=2, maxfun=13, method=method ) with ctx: z1 = z_at_value( cosmo.angular_diameter_distance, 1 * u.Gpc, zmin=2, maxfun=13, method=method ) assert allclose(z0, 0.32442, rtol=ztol[method][0]) assert allclose(z1, 8.18551, rtol=ztol[method][1]) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize( "cosmo", [ Planck13, Planck15, Planck18, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, LambdaCDM, FlatLambdaCDM, wpwaCDM, w0wzCDM, wCDM, FlatwCDM, w0waCDM, Flatw0waCDM, ], ) def test_z_at_value_roundtrip(cosmo): """ Calculate values from a known redshift, and then check that z_at_value returns the right answer. """ z = 0.5 # Skip Ok, w, de_density_scale because in the Planck cosmologies # they are redshift independent and hence uninvertable, # *_distance_z1z2 methods take multiple arguments, so require # special handling # clone is not a redshift-dependent method # nu_relative_density is not redshift-dependent in the WMAP cosmologies skip = ( "Ok", "Otot", "angular_diameter_distance_z1z2", "clone", "is_equivalent", "de_density_scale", "w", ) if str(cosmo.name).startswith("WMAP"): skip += ("nu_relative_density",) methods = inspect.getmembers(cosmo, predicate=inspect.ismethod) for name, func in methods: if name.startswith("_") or name in skip: continue fval = func(z) # we need zmax here to pick the right solution for # angular_diameter_distance and related methods. # Be slightly more generous with rtol than the default 1e-8 # used in z_at_value got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12) assert allclose(got, z, rtol=2e-11), f"Round-trip testing {name} failed" # Test distance functions between two redshifts; only for realizations if isinstance(getattr(cosmo, "name", None), str): z2 = 2.0 func_z1z2 = [ lambda z1: cosmo.comoving_distance(z1, z2), lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2), lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2), ] for func in func_z1z2: fval = func(z) assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
Test_ZatValue
python
kamyu104__LeetCode-Solutions
Python/populating-next-right-pointers-in-each-node.py
{ "start": 331, "end": 761 }
class ____(object): # @param root, a tree node # @return nothing def connect(self, root): head = root while head: cur = head while cur and cur.left: cur.left.next = cur.right if cur.next: cur.right.next = cur.next.left cur = cur.next head = head.left # Time: O(n) # Space: O(logn) # recusion
Solution
python
kamyu104__LeetCode-Solutions
Python/n-ary-tree-level-order-traversal.py
{ "start": 146, "end": 523 }
class ____(object): def levelOrder(self, root): """ :type root: Node :rtype: List[List[int]] """ if not root: return [] result, q = [], [root] while q: result.append([node.val for node in q]) q = [child for node in q for child in node.children if child] return result
Solution
python
openai__openai-python
src/openai/types/video_delete_response.py
{ "start": 195, "end": 464 }
class ____(BaseModel): id: str """Identifier of the deleted video.""" deleted: bool """Indicates that the video resource was deleted.""" object: Literal["video.deleted"] """The object type that signals the deletion response."""
VideoDeleteResponse
python
astropy__astropy
astropy/visualization/wcsaxes/core.py
{ "start": 1046, "end": 1928 }
class ____(Artist): """This is a dummy artist to enforce the correct z-order of axis ticks, tick labels, and gridlines. FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder and then renders them in sequence. For normal Matplotlib axes, the ticks, tick labels, and gridlines are included in this list of artists and hence are automatically drawn in the correct order. However, ``WCSAxes`` disables the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders ersatz ticks, labels, and gridlines by explicitly calling the functions ``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc. This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels, and gridlines in the standary way. """ def draw(self, renderer): self.axes.draw_wcsaxes(renderer)
_WCSAxesArtist
python
Lightning-AI__lightning
src/lightning/pytorch/loggers/csv_logs.py
{ "start": 2156, "end": 5547 }
class ____(Logger, FabricCSVLogger): r"""Log to local file system in yaml and CSV format. Logs are saved to ``os.path.join(save_dir, name, version)``. Example: >>> from lightning.pytorch import Trainer >>> from lightning.pytorch.loggers import CSVLogger >>> logger = CSVLogger("logs", name="my_exp_name") >>> trainer = Trainer(logger=logger) Args: save_dir: Save directory name: Experiment name, optional. Defaults to ``'lightning_logs'``. If name is ``None``, logs (versions) will be stored to the save dir directly. version: Experiment version. If version is not specified the logger inspects the save directory for existing versions, then automatically assigns the next available version. prefix: A string to put at the beginning of metric keys. flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps). """ LOGGER_JOIN_CHAR = "-" def __init__( self, save_dir: _PATH, name: Optional[str] = "lightning_logs", version: Optional[Union[int, str]] = None, prefix: str = "", flush_logs_every_n_steps: int = 100, ): super().__init__( root_dir=save_dir, name=name, version=version, prefix=prefix, flush_logs_every_n_steps=flush_logs_every_n_steps, ) self._save_dir = os.fspath(save_dir) @property @override def root_dir(self) -> str: """Parent directory for all checkpoint subdirectories. If the experiment name parameter is an empty string, no experiment subdirectory is used and the checkpoint will be saved in "save_dir/version" """ return os.path.join(self.save_dir, self.name) @property @override def log_dir(self) -> str: """The log directory for this run. By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the constructor's version parameter instead of ``None`` or an int. """ # create a pseudo standard path version = self.version if isinstance(self.version, str) else f"version_{self.version}" return os.path.join(self.root_dir, version) @property @override def save_dir(self) -> str: """The current directory where logs are saved. Returns: The path to current directory where logs are saved. """ return self._save_dir @override @rank_zero_only def log_hyperparams(self, params: Optional[Union[dict[str, Any], Namespace]] = None) -> None: params = _convert_params(params) self.experiment.log_hparams(params) @property @override @rank_zero_experiment def experiment(self) -> _FabricExperimentWriter: r"""Actual _ExperimentWriter object. To use _ExperimentWriter features in your :class:`~lightning.pytorch.core.LightningModule` do the following. Example:: self.logger.experiment.some_experiment_writer_function() """ if self._experiment is not None: return self._experiment self._fs.makedirs(self.root_dir, exist_ok=True) self._experiment = ExperimentWriter(log_dir=self.log_dir) return self._experiment
CSVLogger
python
tensorflow__tensorflow
tensorflow/python/distribute/coordinator/cluster_coordinator.py
{ "start": 22784, "end": 33977 }
class ____(object): """Handles preemptions of workers and parameter servers. Starts a thread to regularly poll the coordination service (hosted on PS 0) for task states. When a worker's task state reflects an error, it inspects the error. If the error is recoverable (i.e. a preemption), it waits for the worker to recover, then updates the server def. Otherwise, it raises the error to the user. A worker error is detected to be recoverable if it is the result of missing a heartbeat that workers regularly send to the coordination service. The thread also checks for parameter server errors. If these are detected, the thread and coordinator shutdown. To resume training in this case, the whole job must be restarted and resumed from the latest checkpoint. """ def __init__(self, server_def, cluster): self._server_def = server_def self._cluster = cluster self._cluster_update_lock = threading.Lock() self._cluster_due_for_update_or_finish = threading.Event() self._worker_up_cond = threading.Condition(self._cluster_update_lock) self._next_task_state_cond = threading.Condition() self._task_states = None self._error_from_recovery = None self._should_preemption_thread_run = True self._task_state_poller_thread = utils.RepeatedTimer( interval=_POLL_FREQ_IN_SEC, function=self._get_task_states) self._preemption_handler_thread = threading.Thread( target=self._preemption_handler, name="WorkerPreemptionHandler", daemon=True) self._preemption_handler_thread.start() self._num_workers = self._cluster._num_workers self._num_ps = self._cluster._num_ps def stop(self): """Ensure the worker preemption thread is closed.""" self._task_state_poller_thread.stop() self._should_preemption_thread_run = False with self._cluster_update_lock: self._cluster_due_for_update_or_finish.set() # TODO(yuefengz): The preemption handler thread shouldn't be terminated # asynchronously since it touches eager context which is a process-wide # singleton. The problem is in OSS unit tests will time out. @contextlib.contextmanager def wait_on_failure(self, on_failure_fn=None, on_transient_failure_fn=None, on_recovery_fn=None, worker_device_name="(unknown)"): """Catches errors during closure execution and handles them. Args: on_failure_fn: an optional function to run if preemption happens. on_transient_failure_fn: an optional function to run if transient failure happens. on_recovery_fn: an optional function to run when a worker is recovered from preemption. worker_device_name: the device name of the worker instance that is passing through the failure. Yields: None. """ assert self._should_preemption_thread_run try: yield except (errors.OpError, ClosureInputError, ClosureAbortedError) as e: # The next state could reflect stale heartbeats, so wait for two rounds. # Example: # - Worker sends healthy heartbeat at T=0. # - Coordination service receives healthy heartbeat at T=0. # - Worker gets preempted at T=0.1. # - Coordinator catches error at T=0.2, and waits here for next states. # - Coordinator polls states at T=1.9. Heartbeat time has not elapsed yet, # so coordination service does not know it is down yet. # - Coordination service learns of worker unavailability at T=2, the next # heartbeat. # - Coordinator polls states at T=3.9 and learns of worker unavailability. with self._next_task_state_cond: # Give some buffer time to make sure task states are updated during the # wait interval self._next_task_state_cond.wait(_POLL_FREQ_IN_SEC * 1.25) with self._next_task_state_cond: self._next_task_state_cond.wait(_POLL_FREQ_IN_SEC * 1.25) # Check for coordination service failure if not self._task_states: self._log_ps_failure_and_raise(e, 0) worker_states = self._task_states[:self._num_workers] ps_states = self._task_states[self._num_workers:] # Check for PS failure if any(ps_states): failed_ps_index = [ ix for ix, ps_state in enumerate(ps_states) if ps_state ] self._log_ps_failure_and_raise(e, failed_ps_index[0]) # Check for preemption of this worker worker_ix = int(worker_device_name.split(":")[-1]) if worker_states[worker_ix]: # Raise error if all closures are being cancelled if self._cluster.closure_queue._cancellation_mgr.is_cancelled: # pylint: disable=protected-access if isinstance(e, errors.CancelledError): raise e # It's possible the caught error `e` here is due to worker preemption # and is thus not a `CancelledError`, because a different # unrecoverable error on another worker caused closure cancellation, # while this thread was waiting for task states. So raise a new # CancelledError. else: raise errors.CancelledError( None, None, "The corresponding function was cancelled while " "attempting to recover from worker failure.") # Else, preemption self._handle_failure_and_recovery(e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name) return # else, if timeout: log if self._cluster._record_and_ignore_transient_timeouts(e): # pylint: disable=protected-access logging.error( "Remote function on worker %s failed with %r:%s\n" "This derived error is ignored and not reported to users.", worker_device_name, e, e) if on_transient_failure_fn: on_transient_failure_fn() return raise e def _handle_failure_and_recovery(self, e, on_failure_fn, on_transient_failure_fn, on_recovery_fn, worker_device_name): """Call failure fn, wait for cluster to recover, then call recovery fn. Args: e: the Exception thrown during closure execution. on_failure_fn: an optional function to run if preemption happens. on_transient_failure_fn: an optional function to run if transient failure happens. on_recovery_fn: an optional function to run when a worker is recovered from preemption. worker_device_name: the device name of the worker instance that is passing through the failure. """ if on_failure_fn: on_failure_fn(e) # update server def with self._cluster_update_lock: self._cluster_due_for_update_or_finish.set() self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC) if self._error_from_recovery: # TODO(yuefengz): there is only one worker that will get this error. # Ideally we should let all workers notified by `_worker_up_cond` get # this error. try: raise self._error_from_recovery finally: self._error_from_recovery = None logging.info("Worker %s has been recovered.", worker_device_name) if on_recovery_fn: logging.info("Worker %s calling on_recovery_fn", worker_device_name) with self.wait_on_failure( on_recovery_fn=on_recovery_fn, on_transient_failure_fn=on_transient_failure_fn, worker_device_name=worker_device_name): on_recovery_fn() def _log_ps_failure_and_raise(self, e, ps_index): logging.info("Parameter server failure detected at PS task %d", ps_index) self.stop() raise PSUnavailableError(e) def _get_task_states(self): """Get task states and reset to None if coordination service is down.""" try: self._task_states = context.context().get_task_states( [("worker", self._num_workers), ("ps", self._num_ps)] ) except (errors.UnavailableError, errors.InternalError) as e: if isinstance( e, errors.InternalError ) and "coordination service is not enabled" not in str(e).lower(): raise # Coordination service is down self._task_states = None with self._next_task_state_cond: self._next_task_state_cond.notify_all() def _preemption_handler(self): """A loop that handles preemption. This loop waits for signal of worker preemption and upon worker preemption, it waits until all workers are back and updates the cluster about the restarted workers. """ assert self._should_preemption_thread_run while True: self._cluster_due_for_update_or_finish.wait() if not self._should_preemption_thread_run: logging.info("Stopping the failure handing thread.") break with self._cluster_update_lock: try: # TODO(haoyuzhang): support partial cluster recovery logging.info("Cluster now being recovered.") context.context().update_server_def(self._server_def) # Cluster updated successfully, clear the update signal, and notify # all workers that they are recovered from failure. logging.info("Cluster successfully recovered.") self._notify_cluster_update() except Exception as e: # pylint: disable=broad-except logging.info("Error occurred while updating server def: %s", e) # Wait for the next set of states from the task state poller with self._next_task_state_cond: self._next_task_state_cond.wait(_POLL_FREQ_IN_SEC * 2) # If a PS is preempted, set the error if not self._task_states: self._error_from_recovery = e else: ps_states = self._task_states[self._num_workers:] # Check for PS failure if any(ps_states): self._error_from_recovery = e # Else, likely another worker failed. Just log and retry self._notify_cluster_update() # NOTE: Since the first RPC (GetStatus) of update_server_def is # currently blocking by default, error should only happen if: # (1) More workers failed while waiting for the previous workers to # come back; # (2) Worker failed when exchanging subsequent RPCs after the first # RPC returns. # Consider adding backoff retry logic if we see the error logged # too frequently. logging.error("Cluster update failed with error: %s. Retrying...", e) def _notify_cluster_update(self): self._worker_up_cond.notify_all() # The check for _should_preemption_thread_run is necessary since the # `stop` may have already set _cluster_due_for_update_or_finish. if self._should_preemption_thread_run: self._cluster_due_for_update_or_finish.clear()
CoordinationServicePreemptionHandler
python
great-expectations__great_expectations
great_expectations/expectations/metrics/column_map_metrics/column_values_not_in_set.py
{ "start": 424, "end": 1541 }
class ____(ColumnMapMetricProvider): condition_metric_name = "column_values.not_in_set" condition_value_keys = ("value_set",) @column_condition_partial(engine=PandasExecutionEngine) def _pandas( cls, column, value_set, **kwargs, ): if value_set is None: # Vacuously true return np.ones(len(column), dtype=np.bool_) if pd.api.types.is_datetime64_any_dtype(column): parsed_value_set = parse_value_set(value_set=value_set) else: parsed_value_set = value_set return ~column.isin(parsed_value_set) @column_condition_partial(engine=SqlAlchemyExecutionEngine) def _sqlalchemy( cls, column, value_set, **kwargs, ): if value_set is None or len(value_set) == 0: return True return column.notin_(tuple(value_set)) @column_condition_partial(engine=SparkDFExecutionEngine) def _spark( cls, column, value_set, **kwargs, ): return ~column.isin(value_set)
ColumnValuesNotInSet
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 90912, "end": 90990 }
class ____(BinOpSeries): operation = M.le _operator_repr = "<="
LESeries
python
spyder-ide__spyder
spyder/plugins/completion/api.py
{ "start": 21339, "end": 21607 }
class ____: ADDITION = 'addition' DELETION = 'deletion' # ---------------- OTHER GENERAL PURPOSE CONSTANTS ------------------ COMPLETION_ENTRYPOINT = 'spyder.completions' # -------------- SPYDER COMPLETION PROVIDER INTERFACE ---------------
WorkspaceUpdateKind
python
catalyst-team__catalyst
catalyst/contrib/losses/trevsky.py
{ "start": 2085, "end": 4612 }
class ____(nn.Module): """The focal trevsky loss. TrevskyIndex = TP / (TP + alpha * FN + betta * FP) FocalTrevskyLoss = (1 - TrevskyIndex)^gamma Node: focal will use per image, so loss will pay more attention on complicated images """ def __init__( self, alpha: float, beta: Optional[float] = None, gamma: float = 4 / 3, class_dim: int = 1, mode: str = "macro", weights: List[float] = None, eps: float = 1e-7, ): """ Args: alpha: false negative coefficient, bigger alpha bigger penalty for false negative. Must be in (0, 1) beta: false positive coefficient, bigger alpha bigger penalty for false positive. Must be in (0, 1), if None beta = (1 - alpha) gamma: focal coefficient. It determines how much the weight of simple examples is reduced. class_dim: indicates class dimention (K) for ``outputs`` and ``targets`` tensors (default = 1) mode: class summation strategy. Must be one of ['micro', 'macro', 'weighted']. If mode='micro', classes are ignored, and metric are calculated generally. If mode='macro', metric are calculated separately and than are averaged over all classes. If mode='weighted', metric are calculated separately and than summed over all classes with weights. weights: class weights(for mode="weighted") eps: epsilon to avoid zero division """ super().__init__() self.gamma = gamma self.trevsky_loss = TrevskyLoss( alpha=alpha, beta=beta, class_dim=class_dim, mode=mode, weights=weights, eps=eps, ) def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: """Calculates loss between ``logits`` and ``target`` tensors.""" loss = 0 batch_size = len(outputs) for output_sample, target_sample in zip(outputs, targets): output_sample = torch.unsqueeze(output_sample, dim=0) target_sample = torch.unsqueeze(target_sample, dim=0) sample_loss = self.trevsky_loss(output_sample, target_sample) loss += sample_loss ** self.gamma loss = loss / batch_size # mean over batch return loss __all__ = ["TrevskyLoss", "FocalTrevskyLoss"]
FocalTrevskyLoss
python
Pylons__pyramid
tests/test_session.py
{ "start": 21440, "end": 21845 }
class ____(dict): _dirty = False _cookie_name = 'session' _cookie_max_age = None _cookie_path = '/' _cookie_domain = None _cookie_secure = False _cookie_httponly = False _timeout = 1200 _reissue_time = 0 def __init__(self, request): self.request = request dict.__init__(self, {}) def changed(self): self._dirty = True
DummySessionFactory
python
keon__algorithms
tests/test_array.py
{ "start": 13495, "end": 17454 }
class ____(unittest.TestCase): def test_n_sum(self): self.assertEqual(n_sum(2, [-3, 5, 2, 3, 8, -9], 6), []) # noqa: E501 self.assertEqual( n_sum(3, [-5, -4, -3, -2, -1, 0, 1, 2, 3], 0), sorted( [ [-5, 2, 3], [-2, 0, 2], [-4, 1, 3], [-3, 1, 2], [-1, 0, 1], [-2, -1, 3], [-3, 0, 3], ] ), ) # noqa: E501 self.assertEqual( n_sum(3, [-1, 0, 1, 2, -1, -4], 0), sorted([[-1, -1, 2], [-1, 0, 1]]) ) # noqa: E501 self.assertEqual( n_sum(4, [1, 0, -1, 0, -2, 2], 0), sorted([[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]]), ) # noqa: E501 self.assertEqual( n_sum( 4, [7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 6, 4, -3, -2], 10 ), sorted( [ [-6, 2, 7, 7], [-6, 3, 6, 7], [-6, 4, 5, 7], [-6, 4, 6, 6], [-5, 1, 7, 7], [-5, 2, 6, 7], [-5, 3, 5, 7], [-5, 3, 6, 6], [-5, 4, 4, 7], [-5, 4, 5, 6], [-4, 0, 7, 7], [-4, 1, 6, 7], [-4, 2, 5, 7], [-4, 2, 6, 6], [-4, 3, 4, 7], [-4, 3, 5, 6], [-4, 4, 4, 6], [-3, -1, 7, 7], [-3, 0, 6, 7], [-3, 1, 5, 7], [-3, 1, 6, 6], [-3, 2, 4, 7], [-3, 2, 5, 6], [-3, 3, 4, 6], [-3, 4, 4, 5], [-2, -2, 7, 7], [-2, -1, 6, 7], [-2, 0, 5, 7], [-2, 0, 6, 6], [-2, 1, 4, 7], [-2, 1, 5, 6], [-2, 2, 3, 7], [-2, 2, 4, 6], [-2, 3, 4, 5], [-1, 0, 4, 7], [-1, 0, 5, 6], [-1, 1, 3, 7], [-1, 1, 4, 6], [-1, 2, 3, 6], [-1, 2, 4, 5], [-1, 3, 4, 4], [0, 1, 2, 7], [0, 1, 3, 6], [0, 1, 4, 5], [0, 2, 3, 5], [0, 2, 4, 4], [1, 2, 3, 4], ] ), ) # noqa: E501 self.assertEqual( n_sum( 2, [[-3, 0], [-2, 1], [2, 2], [3, 3], [8, 4], [-9, 5]], 0, # noqa: E501 sum_closure=lambda a, b: a[0] + b[0], ), # noqa: E501 [[[-3, 0], [3, 3]], [[-2, 1], [2, 2]]], ) # noqa: E501 self.assertEqual( n_sum( 2, [[-3, 0], [-2, 1], [2, 2], [3, 3], [8, 4], [-9, 5]], [0, 3], # noqa: E501 sum_closure=lambda a, b: [a[0] + b[0], a[1] + b[1]], # noqa: E501 same_closure=lambda a, b: a[0] == b[0] and a[1] == b[1], ), # noqa: E501 [[[-3, 0], [3, 3]], [[-2, 1], [2, 2]]], ) # noqa: E501 self.assertEqual( n_sum( 2, [[-3, 0], [-2, 1], [2, 2], [3, 3], [8, 4], [-9, 5]], -5, # noqa: E501 sum_closure=lambda a, b: [a[0] + b[1], a[1] + b[0]], # noqa: E501 compare_closure=lambda a, b: -1 if a[0] < b else 1 if a[0] > b else 0, ), # noqa: E501 [[[-9, 5], [8, 4]]], ) # noqa: E501 if __name__ == "__main__": unittest.main()
TestNSum
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/constructors.py
{ "start": 3875, "end": 4190 }
class ____(ConstructorObscure): def __init__(self, a, b): super().__init__(a, b) def test_parent_with_obscure_constructor(): _test_sink(ParentWithObscureConstructor(_test_source(), "")) # Issue. _test_sink(ParentWithObscureConstructor("", _test_source())) # Issue.
ParentWithObscureConstructor
python
pyinstaller__pyinstaller
PyInstaller/exceptions.py
{ "start": 652, "end": 1140 }
class ____(HookError): def __str__(self): return ( "ERROR: Failed to import module {0} required by hook for module {1}. Please check whether module {0} " "actually exists and whether the hook is compatible with your version of {1}: You might want to read more " "about hooks in the manual and provide a pull-request to improve PyInstaller.".format( self.args[0], self.args[1] ) )
ImportErrorWhenRunningHook
python
pytorch__pytorch
torch/utils/data/datapipes/datapipe.py
{ "start": 17117, "end": 17268 }
class ____(_DataPipeSerializationWrapper, MapDataPipe): def __getitem__(self, idx): return self._datapipe[idx]
_MapDataPipeSerializationWrapper
python
django__django
tests/utils_tests/test_timesince.py
{ "start": 12431, "end": 12599 }
class ____(TimesinceTests): def setUp(self): super().setUp() self.t = timezone.make_aware(self.t, timezone.get_default_timezone())
TZAwareTimesinceTests
python
ray-project__ray
python/ray/serve/batching.py
{ "start": 20862, "end": 28662 }
class ____(Protocol): """Descibes behaviour of decorator produced by calling `batch` with arguments""" @overload # Sync function def __call__(self, _sync_func: Callable[[List[T]], List[R]], /) -> Callable[[T], R]: ... @overload # Async function def __call__( self, _async_func: Callable[[List[T]], Coroutine[Any, Any, List[R]]], / ) -> Callable[[T], Coroutine[Any, Any, R]]: ... @overload # Sync method def __call__( self, _sync_meth: _SyncBatchingMethod[SelfType, T, R], / ) -> Callable[[SelfType, T], R]: ... @overload # Async method def __call__( self, _async_meth: _AsyncBatchingMethod[SelfType, T, R], / ) -> Callable[[SelfType, T], Coroutine[Any, Any, R]]: ... @PublicAPI(stability="stable") def batch( _func: Optional[Callable] = None, /, max_batch_size: int = 10, batch_wait_timeout_s: float = 0.01, max_concurrent_batches: int = 1, ) -> Callable: """Converts a function to asynchronously handle batches. The function can be a standalone function or a class method. In both cases, the function must be `async def` and take a list of objects as its sole argument and return a list of the same length as a result. When invoked, the caller passes a single object. These will be batched and executed asynchronously once there is a batch of `max_batch_size` or `batch_wait_timeout_s` has elapsed, whichever occurs first. `max_batch_size` and `batch_wait_timeout_s` can be updated using setter methods from the batch_handler (`set_max_batch_size` and `set_batch_wait_timeout_s`). Example: .. code-block:: python from ray import serve from starlette.requests import Request @serve.deployment class BatchedDeployment: @serve.batch(max_batch_size=10, batch_wait_timeout_s=0.1) async def batch_handler(self, requests: List[Request]) -> List[str]: response_batch = [] for r in requests: name = (await requests.json())["name"] response_batch.append(f"Hello {name}!") return response_batch def update_batch_params(self, max_batch_size, batch_wait_timeout_s): self.batch_handler.set_max_batch_size(max_batch_size) self.batch_handler.set_batch_wait_timeout_s(batch_wait_timeout_s) async def __call__(self, request: Request): return await self.batch_handler(request) app = BatchedDeployment.bind() Arguments: max_batch_size: the maximum batch size that will be executed in one call to the underlying function. batch_wait_timeout_s: the maximum duration to wait for `max_batch_size` elements before running the current batch. max_concurrent_batches: the maximum number of batches that can be executed concurrently. If the number of concurrent batches exceeds this limit, the batch handler will wait for a batch to complete before sending the next batch to the underlying function. """ # `_func` will be None in the case when the decorator is parametrized. # See the comment at the end of this function for a detailed explanation. if _func is not None: if not callable(_func): raise TypeError( "@serve.batch can only be used to decorate functions or methods." ) if not iscoroutinefunction(_func): raise TypeError("Functions decorated with @serve.batch must be 'async def'") _validate_max_batch_size(max_batch_size) _validate_batch_wait_timeout_s(batch_wait_timeout_s) _validate_max_concurrent_batches(max_concurrent_batches) def _batch_decorator(_func): lazy_batch_queue_wrapper = _LazyBatchQueueWrapper( max_batch_size, batch_wait_timeout_s, max_concurrent_batches, _func, ) async def batch_handler_generator( first_future: asyncio.Future, ) -> AsyncGenerator: """Generator that handles generator batch functions.""" future = first_future while True: try: async_response: _GeneratorResult = await future future = async_response.next_future yield async_response.result except StopAsyncIteration: break def enqueue_request(args, kwargs) -> asyncio.Future: flattened_args: List = flatten_args(extract_signature(_func), args, kwargs) # If the function is a method, remove self as an argument. self = extract_self_if_method_call(args, _func) if self is not None: flattened_args = flattened_args[2:] batch_queue = lazy_batch_queue_wrapper.queue future = get_or_create_event_loop().create_future() request_context = serve.context._get_serve_request_context() batch_queue.put( _SingleRequest(self, flattened_args, future, request_context) ) return future @wraps(_func) def generator_batch_wrapper(*args, **kwargs): first_future = enqueue_request(args, kwargs) return batch_handler_generator(first_future) @wraps(_func) async def batch_wrapper(*args, **kwargs): # This will raise if the underlying call raised an exception. return await enqueue_request(args, kwargs) if isasyncgenfunction(_func): wrapper = generator_batch_wrapper else: wrapper = batch_wrapper # We store the lazy_batch_queue_wrapper's getters and setters as # batch_wrapper attributes, so they can be accessed in user code. wrapper._get_max_batch_size = lazy_batch_queue_wrapper.get_max_batch_size wrapper._get_batch_wait_timeout_s = ( lazy_batch_queue_wrapper.get_batch_wait_timeout_s ) wrapper.set_max_batch_size = lazy_batch_queue_wrapper.set_max_batch_size wrapper.set_batch_wait_timeout_s = ( lazy_batch_queue_wrapper.set_batch_wait_timeout_s ) # Store debugging methods in the lazy_batch_queue wrapper wrapper._get_curr_iteration_start_times = ( lazy_batch_queue_wrapper._get_curr_iteration_start_times ) wrapper._is_batching_task_alive = ( lazy_batch_queue_wrapper._is_batching_task_alive ) wrapper._get_handling_task_stack = ( lazy_batch_queue_wrapper._get_handling_task_stack ) return wrapper # Unfortunately, this is required to handle both non-parametrized # (@serve.batch) and parametrized (@serve.batch(**kwargs)) usage. # In the former case, `serve.batch` will be called with the underlying # function as the sole argument. In the latter case, it will first be # called with **kwargs, then the result of that call will be called # with the underlying function as the sole argument (i.e., it must be a # "decorator factory."). return _batch_decorator(_func) if callable(_func) else _batch_decorator def _set_result_if_not_done(future: asyncio.Future, result: Any): """Sets the future's result if the future is not done.""" if not future.done(): future.set_result(result) def _set_exception_if_not_done(future: asyncio.Future, exception: Any): """Sets the future's exception if the future is not done.""" if not future.done(): future.set_exception(exception)
_BatchDecorator
python
Textualize__textual
tests/tree/test_tree_clearing.py
{ "start": 325, "end": 370 }
class ____(Tree[VerseBody]): pass
VerseTree
python
getsentry__sentry
src/sentry/api/endpoints/project_symbol_sources.py
{ "start": 2083, "end": 4071 }
class ____(serializers.Serializer): """ Filter settings for the source. This is optional for all sources. **`filetypes`** ***(list)*** - A list of file types that can be found on this source. If this is left empty, all file types will be enabled. The options are: - `pe` - Windows executable files - `pdb` - Windows debug files - `portablepdb` - .NET portable debug files - `mach_code` - MacOS executable files - `mach_debug` - MacOS debug files - `elf_code` - ELF executable files - `elf_debug` - ELF debug files - `wasm_code` - WASM executable files - `wasm_debug` - WASM debug files - `breakpad` - Breakpad symbol files - `sourcebundle` - Source code bundles - `uuidmap` - Apple UUID mapping files - `bcsymbolmap` - Apple bitcode symbol maps - `il2cpp` - Unity IL2CPP mapping files - `proguard` - ProGuard mapping files **`path_patterns`** ***(list)*** - A list of glob patterns to check against the debug and code file paths of debug files. Only files that match one of these patterns will be requested from the source. If this is left empty, no path-based filtering takes place. **`requires_checksum`** ***(boolean)*** - Whether this source requires a debug checksum to be sent with each request. Defaults to `false`. ```json { "filters": { "filetypes": ["pe", "pdb", "portablepdb"], "path_patterns": ["*ffmpeg*"] } } ``` """ filetypes = serializers.MultipleChoiceField( choices=VALID_FILE_TYPES, required=False, help_text="The file types enabled for the source.", ) path_patterns = serializers.ListField( child=serializers.CharField(), required=False, help_text="The debug and code file paths enabled for the source.", ) requires_checksum = serializers.BooleanField( required=False, help_text="Whether the source requires debug checksums." )
FiltersSerializer
python
spyder-ide__spyder
external-deps/spyder-kernels/spyder_kernels/comms/commbase.py
{ "start": 2921, "end": 5555 }
class ____(): def __init__(self, call_name, call_id): self.call_name = call_name self.call_id = call_id self.etype, self.error, tb = sys.exc_info() self.tb = traceback.extract_tb(tb) def to_json(self): """Create JSON representation.""" return { "call_name": self.call_name, "call_id": self.call_id, "etype": self.etype.__name__, "args": self.error.args, "error_name": getattr(self.error, "name", None), "tb": stacksummary_to_json(self.tb) } @classmethod def from_json(cls, json_data): """Get a CommsErrorWrapper from a JSON representation.""" instance = cls.__new__(cls) instance.call_name = json_data["call_name"] instance.call_id = json_data["call_id"] etype = json_data["etype"] instance.etype = getattr( builtins, etype, type(etype, (Exception,), {}) ) instance.error = instance.etype(*json_data["args"]) if json_data["error_name"]: instance.error.name = json_data["error_name"] instance.tb = stacksummary_from_json(json_data["tb"]) return instance def raise_error(self): """ Raise the error while adding informations on the callback. """ # Add the traceback in the error, so it can be handled upstream raise self.etype(self) def format_error(self): """ Format the error received from the other side and returns a list of strings. """ lines = (['Exception in comms call {}:\n'.format(self.call_name)] + traceback.format_list(self.tb) + traceback.format_exception_only(self.etype, self.error)) return lines def print_error(self, file=None): """ Print the error to file or to sys.stderr if file is None. """ if file is None: file = sys.stderr for line in self.format_error(): print(line, file=file) def __str__(self): """Get string representation.""" return str(self.error) def __repr__(self): """Get repr.""" return repr(self.error) # Replace sys.excepthook to handle CommsErrorWrapper sys_excepthook = sys.excepthook def comm_excepthook(type, value, tb): if len(value.args) == 1 and isinstance(value.args[0], CommsErrorWrapper): traceback.print_tb(tb) value.args[0].print_error() return sys_excepthook(type, value, tb) sys.excepthook = comm_excepthook
CommsErrorWrapper
python
kennethreitz__tablib
src/tablib/formats/__init__.py
{ "start": 1189, "end": 1633 }
class ____: def __init__(self, key, format_or_path): self.key = key self._format_path = None if isinstance(format_or_path, str): self._format = None self._format_path = format_or_path else: self._format = format_or_path def ensure_format_loaded(self): if self._format is None: self._format = load_format_class(self._format_path)
FormatDescriptorBase
python
pytorch__pytorch
test/distributions/test_distributions.py
{ "start": 197192, "end": 227564 }
class ____(DistributionsTestCase): def setUp(self): super().setUp() self.scalar_sample = 1 self.tensor_sample_1 = torch.ones(3, 2) self.tensor_sample_2 = torch.ones(3, 2, 3) def test_entropy_shape(self): for Dist, params in _get_examples(): for i, param in enumerate(params): dist = Dist(validate_args=False, **param) try: actual_shape = dist.entropy().size() expected_shape = ( dist.batch_shape if dist.batch_shape else torch.Size() ) message = f"{Dist.__name__} example {i + 1}/{len(params)}, shape mismatch. expected {expected_shape}, actual {actual_shape}" # noqa: B950 self.assertEqual(actual_shape, expected_shape, msg=message) except NotImplementedError: continue def test_bernoulli_shape_scalar_params(self): bernoulli = Bernoulli(0.3) self.assertEqual(bernoulli._batch_shape, torch.Size()) self.assertEqual(bernoulli._event_shape, torch.Size()) self.assertEqual(bernoulli.sample().size(), torch.Size()) self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, bernoulli.log_prob, self.scalar_sample) self.assertEqual( bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_bernoulli_shape_tensor_params(self): bernoulli = Bernoulli(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]])) self.assertEqual(bernoulli._batch_shape, torch.Size((3, 2))) self.assertEqual(bernoulli._event_shape, torch.Size(())) self.assertEqual(bernoulli.sample().size(), torch.Size((3, 2))) self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2))) self.assertEqual( bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, bernoulli.log_prob, self.tensor_sample_2) self.assertEqual( bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)) ) def test_geometric_shape_scalar_params(self): geometric = Geometric(0.3) self.assertEqual(geometric._batch_shape, torch.Size()) self.assertEqual(geometric._event_shape, torch.Size()) self.assertEqual(geometric.sample().size(), torch.Size()) self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, geometric.log_prob, self.scalar_sample) self.assertEqual( geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( geometric.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_geometric_shape_tensor_params(self): geometric = Geometric(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]])) self.assertEqual(geometric._batch_shape, torch.Size((3, 2))) self.assertEqual(geometric._event_shape, torch.Size(())) self.assertEqual(geometric.sample().size(), torch.Size((3, 2))) self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2, 3, 2))) self.assertEqual( geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, geometric.log_prob, self.tensor_sample_2) self.assertEqual( geometric.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)) ) def test_beta_shape_scalar_params(self): dist = Beta(0.1, 0.1) self.assertEqual(dist._batch_shape, torch.Size()) self.assertEqual(dist._event_shape, torch.Size()) self.assertEqual(dist.sample().size(), torch.Size()) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, dist.log_prob, self.scalar_sample) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertEqual( dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_beta_shape_tensor_params(self): dist = Beta( torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]), torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]), ) self.assertEqual(dist._batch_shape, torch.Size((3, 2))) self.assertEqual(dist._event_shape, torch.Size(())) self.assertEqual(dist.sample().size(), torch.Size((3, 2))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2))) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2) self.assertEqual( dist.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)) ) def test_binomial_shape(self): dist = Binomial(10, torch.tensor([0.6, 0.3])) self.assertEqual(dist._batch_shape, torch.Size((2,))) self.assertEqual(dist._event_shape, torch.Size(())) self.assertEqual(dist.sample().size(), torch.Size((2,))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2) def test_binomial_shape_vectorized_n(self): dist = Binomial( torch.tensor([[10, 3, 1], [4, 8, 4]]), torch.tensor([0.6, 0.3, 0.1]) ) self.assertEqual(dist._batch_shape, torch.Size((2, 3))) self.assertEqual(dist._event_shape, torch.Size(())) self.assertEqual(dist.sample().size(), torch.Size((2, 3))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2, 3))) self.assertEqual( dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1) def test_multinomial_shape(self): dist = Multinomial(10, torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]])) self.assertEqual(dist._batch_shape, torch.Size((3,))) self.assertEqual(dist._event_shape, torch.Size((2,))) self.assertEqual(dist.sample().size(), torch.Size((3, 2))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2))) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3,))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2) self.assertEqual(dist.log_prob(torch.ones(3, 1, 2)).size(), torch.Size((3, 3))) def test_categorical_shape(self): # unbatched dist = Categorical(torch.tensor([0.6, 0.3, 0.1])) self.assertEqual(dist._batch_shape, torch.Size(())) self.assertEqual(dist._event_shape, torch.Size(())) self.assertEqual(dist.sample().size(), torch.Size()) self.assertEqual( dist.sample((3, 2)).size(), torch.Size( ( 3, 2, ) ), ) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertEqual( dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 1))) # batched dist = Categorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]])) self.assertEqual(dist._batch_shape, torch.Size((3,))) self.assertEqual(dist._event_shape, torch.Size(())) self.assertEqual(dist.sample().size(), torch.Size((3,))) self.assertEqual( dist.sample((3, 2)).size(), torch.Size( ( 3, 2, 3, ) ), ) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1) self.assertEqual( dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 3))) def test_one_hot_categorical_shape(self): # unbatched dist = OneHotCategorical(torch.tensor([0.6, 0.3, 0.1])) self.assertEqual(dist._batch_shape, torch.Size(())) self.assertEqual(dist._event_shape, torch.Size((3,))) self.assertEqual(dist.sample().size(), torch.Size((3,))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1) sample = torch.tensor([0.0, 1.0, 0.0]).expand(3, 2, 3) self.assertEqual( dist.log_prob(sample).size(), torch.Size( ( 3, 2, ) ), ) self.assertEqual( dist.log_prob(dist.enumerate_support()).size(), torch.Size((3,)) ) sample = torch.eye(3) self.assertEqual(dist.log_prob(sample).size(), torch.Size((3,))) # batched dist = OneHotCategorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]])) self.assertEqual(dist._batch_shape, torch.Size((3,))) self.assertEqual(dist._event_shape, torch.Size((2,))) self.assertEqual(dist.sample().size(), torch.Size((3, 2))) self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2))) sample = torch.tensor([0.0, 1.0]) self.assertEqual(dist.log_prob(sample).size(), torch.Size((3,))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2) self.assertEqual( dist.log_prob(dist.enumerate_support()).size(), torch.Size((2, 3)) ) sample = torch.tensor([0.0, 1.0]).expand(3, 1, 2) self.assertEqual(dist.log_prob(sample).size(), torch.Size((3, 3))) def test_cauchy_shape_scalar_params(self): cauchy = Cauchy(0, 1) self.assertEqual(cauchy._batch_shape, torch.Size()) self.assertEqual(cauchy._event_shape, torch.Size()) self.assertEqual(cauchy.sample().size(), torch.Size()) self.assertEqual(cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2))) self.assertRaises(ValueError, cauchy.log_prob, self.scalar_sample) self.assertEqual( cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( cauchy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_cauchy_shape_tensor_params(self): cauchy = Cauchy(torch.tensor([0.0, 0.0]), torch.tensor([1.0, 1.0])) self.assertEqual(cauchy._batch_shape, torch.Size((2,))) self.assertEqual(cauchy._event_shape, torch.Size(())) self.assertEqual(cauchy.sample().size(), torch.Size((2,))) self.assertEqual( cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)) ) self.assertEqual( cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, cauchy.log_prob, self.tensor_sample_2) self.assertEqual(cauchy.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_halfcauchy_shape_scalar_params(self): halfcauchy = HalfCauchy(1) self.assertEqual(halfcauchy._batch_shape, torch.Size()) self.assertEqual(halfcauchy._event_shape, torch.Size()) self.assertEqual(halfcauchy.sample().size(), torch.Size()) self.assertEqual( halfcauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, halfcauchy.log_prob, self.scalar_sample) self.assertEqual( halfcauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( halfcauchy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_halfcauchy_shape_tensor_params(self): halfcauchy = HalfCauchy(torch.tensor([1.0, 1.0])) self.assertEqual(halfcauchy._batch_shape, torch.Size((2,))) self.assertEqual(halfcauchy._event_shape, torch.Size(())) self.assertEqual(halfcauchy.sample().size(), torch.Size((2,))) self.assertEqual( halfcauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)) ) self.assertEqual( halfcauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, halfcauchy.log_prob, self.tensor_sample_2) self.assertEqual( halfcauchy.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)) ) def test_dirichlet_shape(self): dist = Dirichlet(torch.tensor([[0.6, 0.3], [1.6, 1.3], [2.6, 2.3]])) self.assertEqual(dist._batch_shape, torch.Size((3,))) self.assertEqual(dist._event_shape, torch.Size((2,))) self.assertEqual(dist.sample().size(), torch.Size((3, 2))) self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4, 3, 2))) simplex_sample = self.tensor_sample_1 / self.tensor_sample_1.sum( -1, keepdim=True ) self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3,))) self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2) simplex_sample = torch.ones(3, 1, 2) simplex_sample = simplex_sample / simplex_sample.sum(-1).unsqueeze(-1) self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3, 3))) def test_mixture_same_family_shape(self): dist = MixtureSameFamily( Categorical(torch.rand(5)), Normal(torch.randn(5), torch.rand(5)) ) self.assertEqual(dist._batch_shape, torch.Size()) self.assertEqual(dist._event_shape, torch.Size()) self.assertEqual(dist.sample().size(), torch.Size()) self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4))) self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertEqual( dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_gamma_shape_scalar_params(self): gamma = Gamma(1, 1) self.assertEqual(gamma._batch_shape, torch.Size()) self.assertEqual(gamma._event_shape, torch.Size()) self.assertEqual(gamma.sample().size(), torch.Size()) self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual(gamma.log_prob(self.scalar_sample).size(), torch.Size()) self.assertEqual( gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( gamma.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_gamma_shape_tensor_params(self): gamma = Gamma(torch.tensor([1.0, 1.0]), torch.tensor([1.0, 1.0])) self.assertEqual(gamma._batch_shape, torch.Size((2,))) self.assertEqual(gamma._event_shape, torch.Size(())) self.assertEqual(gamma.sample().size(), torch.Size((2,))) self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual( gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, gamma.log_prob, self.tensor_sample_2) self.assertEqual(gamma.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_chi2_shape_scalar_params(self): chi2 = Chi2(1) self.assertEqual(chi2._batch_shape, torch.Size()) self.assertEqual(chi2._event_shape, torch.Size()) self.assertEqual(chi2.sample().size(), torch.Size()) self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual(chi2.log_prob(self.scalar_sample).size(), torch.Size()) self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertEqual( chi2.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_chi2_shape_tensor_params(self): chi2 = Chi2(torch.tensor([1.0, 1.0])) self.assertEqual(chi2._batch_shape, torch.Size((2,))) self.assertEqual(chi2._event_shape, torch.Size(())) self.assertEqual(chi2.sample().size(), torch.Size((2,))) self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertRaises(ValueError, chi2.log_prob, self.tensor_sample_2) self.assertEqual(chi2.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_studentT_shape_scalar_params(self): st = StudentT(1) self.assertEqual(st._batch_shape, torch.Size()) self.assertEqual(st._event_shape, torch.Size()) self.assertEqual(st.sample().size(), torch.Size()) self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, st.log_prob, self.scalar_sample) self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertEqual( st.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_studentT_shape_tensor_params(self): st = StudentT(torch.tensor([1.0, 1.0])) self.assertEqual(st._batch_shape, torch.Size((2,))) self.assertEqual(st._event_shape, torch.Size(())) self.assertEqual(st.sample().size(), torch.Size((2,))) self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))) self.assertRaises(ValueError, st.log_prob, self.tensor_sample_2) self.assertEqual(st.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_pareto_shape_scalar_params(self): pareto = Pareto(1, 1) self.assertEqual(pareto._batch_shape, torch.Size()) self.assertEqual(pareto._event_shape, torch.Size()) self.assertEqual(pareto.sample().size(), torch.Size()) self.assertEqual(pareto.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual( pareto.log_prob(self.tensor_sample_1 + 1).size(), torch.Size((3, 2)) ) self.assertEqual( pareto.log_prob(self.tensor_sample_2 + 1).size(), torch.Size((3, 2, 3)) ) def test_gumbel_shape_scalar_params(self): gumbel = Gumbel(1, 1) self.assertEqual(gumbel._batch_shape, torch.Size()) self.assertEqual(gumbel._event_shape, torch.Size()) self.assertEqual(gumbel.sample().size(), torch.Size()) self.assertEqual(gumbel.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual( gumbel.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( gumbel.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_kumaraswamy_shape_scalar_params(self): kumaraswamy = Kumaraswamy(1, 1) self.assertEqual(kumaraswamy._batch_shape, torch.Size()) self.assertEqual(kumaraswamy._event_shape, torch.Size()) self.assertEqual(kumaraswamy.sample().size(), torch.Size()) self.assertEqual(kumaraswamy.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual( kumaraswamy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( kumaraswamy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_vonmises_shape_tensor_params(self): von_mises = VonMises(torch.tensor([0.0, 0.0]), torch.tensor([1.0, 1.0])) self.assertEqual(von_mises._batch_shape, torch.Size((2,))) self.assertEqual(von_mises._event_shape, torch.Size(())) self.assertEqual(von_mises.sample().size(), torch.Size((2,))) self.assertEqual( von_mises.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)) ) self.assertEqual( von_mises.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( von_mises.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)) ) def test_vonmises_shape_scalar_params(self): von_mises = VonMises(0.0, 1.0) self.assertEqual(von_mises._batch_shape, torch.Size()) self.assertEqual(von_mises._event_shape, torch.Size()) self.assertEqual(von_mises.sample().size(), torch.Size()) self.assertEqual( von_mises.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)) ) self.assertEqual( von_mises.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( von_mises.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_weibull_scale_scalar_params(self): weibull = Weibull(1, 1) self.assertEqual(weibull._batch_shape, torch.Size()) self.assertEqual(weibull._event_shape, torch.Size()) self.assertEqual(weibull.sample().size(), torch.Size()) self.assertEqual(weibull.sample((3, 2)).size(), torch.Size((3, 2))) self.assertEqual( weibull.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( weibull.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_wishart_shape_scalar_params(self): wishart = Wishart(torch.tensor(1), torch.tensor([[1.0]])) self.assertEqual(wishart._batch_shape, torch.Size()) self.assertEqual(wishart._event_shape, torch.Size((1, 1))) self.assertEqual(wishart.sample().size(), torch.Size((1, 1))) self.assertEqual(wishart.sample((3, 2)).size(), torch.Size((3, 2, 1, 1))) self.assertRaises(ValueError, wishart.log_prob, self.scalar_sample) def test_wishart_shape_tensor_params(self): wishart = Wishart(torch.tensor([1.0, 1.0]), torch.tensor([[[1.0]], [[1.0]]])) self.assertEqual(wishart._batch_shape, torch.Size((2,))) self.assertEqual(wishart._event_shape, torch.Size((1, 1))) self.assertEqual(wishart.sample().size(), torch.Size((2, 1, 1))) self.assertEqual(wishart.sample((3, 2)).size(), torch.Size((3, 2, 2, 1, 1))) self.assertRaises(ValueError, wishart.log_prob, self.tensor_sample_2) self.assertEqual(wishart.log_prob(torch.ones(2, 1, 1)).size(), torch.Size((2,))) def test_normal_shape_scalar_params(self): normal = Normal(0, 1) self.assertEqual(normal._batch_shape, torch.Size()) self.assertEqual(normal._event_shape, torch.Size()) self.assertEqual(normal.sample().size(), torch.Size()) self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, normal.log_prob, self.scalar_sample) self.assertEqual( normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( normal.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_normal_shape_tensor_params(self): normal = Normal(torch.tensor([0.0, 0.0]), torch.tensor([1.0, 1.0])) self.assertEqual(normal._batch_shape, torch.Size((2,))) self.assertEqual(normal._event_shape, torch.Size(())) self.assertEqual(normal.sample().size(), torch.Size((2,))) self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual( normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, normal.log_prob, self.tensor_sample_2) self.assertEqual(normal.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_uniform_shape_scalar_params(self): uniform = Uniform(0, 1) self.assertEqual(uniform._batch_shape, torch.Size()) self.assertEqual(uniform._event_shape, torch.Size()) self.assertEqual(uniform.sample().size(), torch.Size()) self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2))) self.assertRaises(ValueError, uniform.log_prob, self.scalar_sample) self.assertEqual( uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( uniform.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_uniform_shape_tensor_params(self): uniform = Uniform(torch.tensor([0.0, 0.0]), torch.tensor([1.0, 1.0])) self.assertEqual(uniform._batch_shape, torch.Size((2,))) self.assertEqual(uniform._event_shape, torch.Size(())) self.assertEqual(uniform.sample().size(), torch.Size((2,))) self.assertEqual( uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)) ) self.assertEqual( uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, uniform.log_prob, self.tensor_sample_2) self.assertEqual(uniform.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_exponential_shape_scalar_param(self): expon = Exponential(1.0) self.assertEqual(expon._batch_shape, torch.Size()) self.assertEqual(expon._event_shape, torch.Size()) self.assertEqual(expon.sample().size(), torch.Size()) self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, expon.log_prob, self.scalar_sample) self.assertEqual( expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( expon.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_exponential_shape_tensor_param(self): expon = Exponential(torch.tensor([1.0, 1.0])) self.assertEqual(expon._batch_shape, torch.Size((2,))) self.assertEqual(expon._event_shape, torch.Size(())) self.assertEqual(expon.sample().size(), torch.Size((2,))) self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual( expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, expon.log_prob, self.tensor_sample_2) self.assertEqual(expon.log_prob(torch.ones(2, 2)).size(), torch.Size((2, 2))) def test_laplace_shape_scalar_params(self): laplace = Laplace(0, 1) self.assertEqual(laplace._batch_shape, torch.Size()) self.assertEqual(laplace._event_shape, torch.Size()) self.assertEqual(laplace.sample().size(), torch.Size()) self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, laplace.log_prob, self.scalar_sample) self.assertEqual( laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertEqual( laplace.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)) ) def test_laplace_shape_tensor_params(self): laplace = Laplace(torch.tensor([0.0, 0.0]), torch.tensor([1.0, 1.0])) self.assertEqual(laplace._batch_shape, torch.Size((2,))) self.assertEqual(laplace._event_shape, torch.Size(())) self.assertEqual(laplace.sample().size(), torch.Size((2,))) self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2, 2))) self.assertEqual( laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)) ) self.assertRaises(ValueError, laplace.log_prob, self.tensor_sample_2) self.assertEqual(laplace.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2))) def test_continuous_bernoulli_shape_scalar_params(self): continuous_bernoulli = ContinuousBernoulli(0.3) self.assertEqual(continuous_bernoulli._batch_shape, torch.Size()) self.assertEqual(continuous_bernoulli._event_shape, torch.Size()) self.assertEqual(continuous_bernoulli.sample().size(), torch.Size()) self.assertEqual(continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2))) self.assertRaises(ValueError, continuous_bernoulli.log_prob, self.scalar_sample) self.assertEqual( continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)), ) self.assertEqual( continuous_bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)), ) def test_continuous_bernoulli_shape_tensor_params(self): continuous_bernoulli = ContinuousBernoulli( torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]) ) self.assertEqual(continuous_bernoulli._batch_shape, torch.Size((3, 2))) self.assertEqual(continuous_bernoulli._event_shape, torch.Size(())) self.assertEqual(continuous_bernoulli.sample().size(), torch.Size((3, 2))) self.assertEqual( continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)) ) self.assertEqual( continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)), ) self.assertRaises( ValueError, continuous_bernoulli.log_prob, self.tensor_sample_2 ) self.assertEqual( continuous_bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)), ) @skipIfTorchDynamo("Not a TorchDynamo suitable test") def test_mixture_same_family_mean_shape(self): mix_distribution = Categorical(torch.ones([3, 1, 3])) component_distribution = Normal(torch.zeros([3, 3, 3]), torch.ones([3, 3, 3])) gmm = MixtureSameFamily(mix_distribution, component_distribution) self.assertEqual(len(gmm.mean.shape), 2) @skipIfTorchDynamo("Not a TorchDynamo suitable test")
TestDistributionShapes
python
pytorch__pytorch
test/test_multiprocessing_spawn.py
{ "start": 6917, "end": 7730 }
class ____(TestCase, _TestMultiProcessing): start_method = 'spawn' def test_exception_raises(self): with self.assertRaises(mp.ProcessRaisedException): mp.spawn(_test_success_first_then_exception_func, args=(), nprocs=1) def test_signal_raises(self): context = mp.spawn(_test_infinite_task, args=(), nprocs=1, join=False) for pid in context.pids(): os.kill(pid, signal.SIGTERM) with self.assertRaises(mp.ProcessExitedException): context.join() def _test_process_exited(self): with self.assertRaises(mp.ProcessExitedException) as e: mp.spawn(_test_process_exit, args=(), nprocs=1) self.assertEqual(12, e.exit_code) @unittest.skipIf( IS_WINDOWS, "Fork is only available on Unix", )
SpawnTest
python
pandas-dev__pandas
pandas/tests/reshape/merge/test_multi.py
{ "start": 26969, "end": 31000 }
class ____: def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi): left_names = left_multi.index.names right_names = right_multi.index.names if join_type == "right": level_order = right_names + left_names.difference(right_names) else: level_order = left_names + right_names.difference(left_names) # Multi-index join tests expected = ( merge( left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi, ) .set_index(level_order) .sort_index() ) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames( self, left_multi, right_multi, join_type, on_cols_multi ): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) left_names = left_multi.index.names right_names = right_multi.index.names if join_type == "right": level_order = right_names + left_names.difference(right_names) else: level_order = left_names + right_names.difference(left_names) expected = ( merge( left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi, ) .set_index(level_order) .sort_index() ) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame( [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] ) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) exp_years = np.array([2016, 2017, 2018], dtype=np.int32) expected = DataFrame({"a": [1, 2, 3], "key_1": exp_years}) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame({"key_0": exp_years, "a_x": [1, 2, 3], "a_y": [1, 2, 3]}) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = MultiIndex.from_tuples( [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] ) left = DataFrame( {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left ) index_right = MultiIndex.from_tuples( [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] ) right = DataFrame( {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, index=index_right, ) result = left.join(right) expected = merge( left.reset_index(), right.reset_index(), on=["key"], how="inner" ).set_index(["key", "X", "Y"]) tm.assert_frame_equal(result, expected) def test_join_multi_wrong_order(self): # GH 25760 # GH 28956 midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) midx3 = MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) left = DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) right = DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) result = left.join(right) expected = DataFrame( index=midx1, data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]}, ) tm.assert_frame_equal(result, expected)
TestJoinMultiMulti
python
ray-project__ray
release/ray_release/tests/test_cluster_manager.py
{ "start": 1229, "end": 1325 }
class ____(Test): def get_anyscale_byod_image(self) -> str: return "anyscale"
MockTest
python
kubernetes-client__python
kubernetes/client/api_client.py
{ "start": 765, "end": 25590 }
class ____(object): """Generic API client for OpenAPI client library builds. OpenAPI generic API client. This client handles the client- server communication, and is invariant across implementations. Specifics of the methods and models for each application are generated from the OpenAPI templates. NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. :param configuration: .Configuration object for this client :param header_name: a header to pass when making calls to the API. :param header_value: a header value to pass when making calls to the API. :param cookie: a cookie to include in the header when making calls to the API :param pool_threads: The number of threads to use for async requests to the API. More threads means more concurrent API requests. """ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types NATIVE_TYPES_MAPPING = { 'int': int, 'long': int if six.PY3 else long, # noqa: F821 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } _pool = None def __init__(self, configuration=None, header_name=None, header_value=None, cookie=None, pool_threads=1): if configuration is None: configuration = Configuration.get_default_copy() self.configuration = configuration self.pool_threads = pool_threads self.rest_client = rest.RESTClientObject(configuration) self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. self.user_agent = 'OpenAPI-Generator/34.0.0+snapshot/python' self.client_side_validation = configuration.client_side_validation def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): if self._pool: self._pool.close() self._pool.join() self._pool = None if hasattr(atexit, 'unregister'): atexit.unregister(self.close) @property def pool(self): """Create thread pool on first request avoids instantiating unused threadpool for blocking clients. """ if self._pool is None: atexit.register(self.close) self._pool = ThreadPool(self.pool_threads) return self._pool @property def user_agent(self): """User agent for this API client""" return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value def __call_api( self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None, _host=None): config = self.configuration # header parameters header_params = header_params or {} header_params.update(self.default_headers) if self.cookie: header_params['Cookie'] = self.cookie if header_params: header_params = self.sanitize_for_serialization(header_params) header_params = dict(self.parameters_to_tuples(header_params, collection_formats)) # path parameters if path_params: path_params = self.sanitize_for_serialization(path_params) path_params = self.parameters_to_tuples(path_params, collection_formats) for k, v in path_params: # specified safe chars, encode everything resource_path = resource_path.replace( '{%s}' % k, quote(str(v), safe=config.safe_chars_for_path_param) ) # query parameters if query_params: query_params = self.sanitize_for_serialization(query_params) query_params = self.parameters_to_tuples(query_params, collection_formats) # post parameters if post_params or files: post_params = post_params if post_params else [] post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) post_params.extend(self.files_parameters(files)) # auth setting self.update_params_for_auth(header_params, query_params, auth_settings) # body if body: body = self.sanitize_for_serialization(body) # request url if _host is None: url = self.configuration.host + resource_path else: # use server/host defined in path or operation instead url = _host + resource_path # perform request and return response response_data = self.request( method, url, query_params=query_params, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout) self.last_response = response_data return_data = response_data if _preload_content: # deserialize response data if response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if _return_http_data_only: return (return_data) else: return (return_data, response_data.status, response_data.getheaders()) def sanitize_for_serialization(self, obj): """Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is OpenAPI model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. """ if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: # Convert model obj to dict except # attributes `openapi_types`, `attribute_map` # and attributes which value is not None. # Convert attribute name to json key in # model definition for request. obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in six.iteritems(obj.openapi_types) if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in six.iteritems(obj_dict)} def deserialize(self, response, response_type): """Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialized object, or string of class name. :return: deserialized object. """ # handle file downloading # save response body into a tmp file and return the instance if response_type == "file": return self.__deserialize_file(response) # fetch data from response object try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type) def __deserialize(self, data, klass): """Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """ if data is None: return None if type(klass) == str: if klass.startswith('list['): sub_kls = re.match(r'list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in six.iteritems(data)} # convert str to class if klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] else: klass = getattr(kubernetes.client.models, klass) if klass in self.PRIMITIVE_TYPES: return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == datetime.date: return self.__deserialize_date(data) elif klass == datetime.datetime: return self.__deserialize_datetime(data) else: return self.__deserialize_model(data, klass) def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None, _host=None): """Makes the HTTP request (synchronous) and returns deserialized data. To make an async_req request, set the async_req parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If async_req parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter async_req is False or missing, then the method will return the response directly. """ if not async_req: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host) return self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout, _host)) def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): """Makes the HTTP request using RESTClient.""" if method == "GET": return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "HEAD": return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "OPTIONS": return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout) elif method == "POST": return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PUT": return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PATCH": return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "DELETE": return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ApiValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`." ) def parameters_to_tuples(self, params, collection_formats): """Get parameters as list of tuples, formatting collections. :param params: Parameters as dict or list of two-tuples :param dict collection_formats: Parameter collection formats :return: Parameters as list of tuples, collections formatted """ new_params = [] if collection_formats is None: collection_formats = {} for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501 if k in collection_formats: collection_format = collection_formats[k] if collection_format == 'multi': new_params.extend((k, value) for value in v) else: if collection_format == 'ssv': delimiter = ' ' elif collection_format == 'tsv': delimiter = '\t' elif collection_format == 'pipes': delimiter = '|' else: # csv is the default delimiter = ',' new_params.append( (k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params def files_parameters(self, files=None): """Builds form parameters. :param files: File parameters. :return: Form parameters with files. """ params = [] if files: for k, v in six.iteritems(files): if not v: continue file_names = v if type(v) is list else [v] for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append( tuple([k, tuple([filename, filedata, mimetype])])) return params def select_header_accept(self, accepts): """Returns `Accept` based on an array of accepts provided. :param accepts: List of headers. :return: Accept (e.g. application/json). """ if not accepts: return accepts = [x.lower() for x in accepts] if 'application/json' in accepts: return 'application/json' else: return ', '.join(accepts) def select_header_content_type(self, content_types): """Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json). """ if not content_types: return 'application/json' content_types = [x.lower() for x in content_types] if 'application/json' in content_types or '*/*' in content_types: return 'application/json' else: return content_types[0] def update_params_for_auth(self, headers, querys, auth_settings): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if not auth_settings: return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: if auth_setting['in'] == 'cookie': headers['Cookie'] = auth_setting['value'] elif auth_setting['in'] == 'header': headers[auth_setting['key']] = auth_setting['value'] elif auth_setting['in'] == 'query': querys.append((auth_setting['key'], auth_setting['value'])) else: raise ApiValueError( 'Authentication token must be in `query` or `header`' ) def __deserialize_file(self, response): """Deserializes body to file Saves response body into a file in a temporary folder, using the filename from the `Content-Disposition` header if provided. :param response: RESTResponse. :return: file path. """ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader("Content-Disposition") if content_disposition: filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, "wb") as f: f.write(response.data) return path def __deserialize_primitive(self, data, klass): """Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool. """ try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data def __deserialize_object(self, value): """Return an original value. :return: object. """ return value def __deserialize_date(self, string): """Deserializes string to date. :param string: str. :return: date. """ try: return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason="Failed to parse `{0}` as date object".format(string) ) def __deserialize_datetime(self, string): """Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime. """ try: return parse(string) except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason=( "Failed to parse `{0}` as datetime object" .format(string) ) ) def __deserialize_model(self, data, klass): """Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object. """ if not klass.openapi_types and not hasattr(klass, 'get_real_child_model'): return data kwargs = {} if (data is not None and klass.openapi_types is not None and isinstance(data, (list, dict))): for attr, attr_type in six.iteritems(klass.openapi_types): if klass.attribute_map[attr] in data: value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if hasattr(instance, 'get_real_child_model'): klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance
ApiClient
python
walkccc__LeetCode
solutions/1177. Can Make Palindrome from Substring/1177.py
{ "start": 0, "end": 331 }
class ____: def canMakePaliQueries(self, s: str, queries: list[list[int]]) -> list[bool]: dp = [0] * (len(s) + 1) for i in range(1, len(s) + 1): dp[i] = dp[i - 1] ^ 1 << ord(s[i - 1]) - ord('a') return [ (dp[right + 1] ^ dp[left]).bit_count() // 2 <= k for left, right, k in queries ]
Solution
python
kamyu104__LeetCode-Solutions
Python/find-products-of-elements-of-big-array.py
{ "start": 102, "end": 1554 }
class ____(object): def findProductsOfElements(self, queries): """ :type queries: List[List[int]] :rtype: List[int] """ def binary_search(left, right, check): while left <= right: mid = left + ((right-left)>>1) if check(mid): right = mid-1 else: left = mid+1 return left def f(x): def count1(x): result = i = 0 while 1<<i <= x: mask = (1<<(i+1))-1 result += ((x&~mask)>>1)+max((x&mask)-(1<<i)+1, 0) i += 1 return result def count2(x): result = i = 0 while 1<<i <= x: mask = (1<<(i+1))-1 result += (((x&~mask)>>1)+max((x&mask)-(1<<i)+1, 0))*i i += 1 return result y = binary_search(1, x-1, lambda i: count1(i) >= x) result = count2(y-1) x -= count1(y-1) i = 0 while 1<<i <= y: if y&(1<<i): result += i x -= 1 if x == 0: break i += 1 return result return [pow(2, f(right+1)-f(left), mod) for left, right, mod in queries]
Solution
python
tensorflow__tensorflow
tensorflow/python/trackable/constants.py
{ "start": 1282, "end": 1370 }
class ____(str, enum.Enum): SAVEDMODEL = "savedmodel" CHECKPOINT = "checkpoint"
SaveType
python
PrefectHQ__prefect
src/prefect/server/utilities/messaging/__init__.py
{ "start": 1945, "end": 2020 }
class ____: data: bytes attributes: Mapping[str, str]
CapturedMessage
python
getsentry__sentry
src/sentry/hybridcloud/services/control_organization_provisioning/impl.py
{ "start": 1366, "end": 2374 }
class ____(Exception): pass def create_post_provision_outbox( provisioning_options: OrganizationProvisioningOptions, org_id: int ) -> RegionOutbox: return RegionOutbox( shard_scope=OutboxScope.ORGANIZATION_SCOPE, shard_identifier=org_id, category=OutboxCategory.POST_ORGANIZATION_PROVISION, object_identifier=org_id, payload=provisioning_options.post_provision_options.dict(), ) def create_organization_provisioning_outbox( organization_id: int, region_name: str, org_provision_payload: OrganizationProvisioningOptions | None, ) -> ControlOutbox: payload = org_provision_payload.dict() if org_provision_payload is not None else None return ControlOutbox( region_name=region_name, shard_scope=OutboxScope.PROVISION_SCOPE, category=OutboxCategory.PROVISION_ORGANIZATION, shard_identifier=organization_id, object_identifier=organization_id, payload=payload, )
SlugMismatchException
python
google__pytype
pytype/tests/test_reingest2.py
{ "start": 1274, "end": 2215 }
class ____(test_base.BaseTest): """Python 3 tests for reloading the pyi we generate.""" def test_instantiate_pyi_class(self): foo = """ import abc class Foo(metaclass=abc.ABCMeta): @abc.abstractmethod def foo(self): pass class Bar(Foo): def foo(self): pass """ with self.DepTree([("foo.py", foo)]): errors = self.CheckWithErrors(""" import foo foo.Foo() # not-instantiable[e] foo.Bar() """) self.assertErrorRegexes(errors, {"e": r"foo\.Foo.*foo"}) def test_use_class_attribute_from_annotated_new(self): foo = """ class Foo: def __new__(cls) -> "Foo": return cls() class Bar: FOO = Foo() """ with self.DepTree([("foo.py", foo)]): self.Check(""" import foo print(foo.Bar.FOO) """) if __name__ == "__main__": test_base.main()
ReingestTestPy3
python
h5py__h5py
examples/threading_example.py
{ "start": 1744, "end": 3667 }
class ____(threading.Thread): """ Computes a slice of the Mandelbrot set, and saves it to the HDF5 file. """ def __init__(self, f, shape, escape, startcoords, extent, eventcall): """ Set up a computation thread. f: HDF5 File object shape: 2-tuple (NX, NY) escape: Integer giving max iterations to escape start: Complex number giving initial location on the plane extent: Complex number giving calculation extent on the plane """ self.f = f self.shape = shape self.escape = escape self.startcoords = startcoords self.extent = extent self.eventcall = eventcall threading.Thread.__init__(self) def run(self): """ Perform computations and record the result to file """ nx, ny = self.shape arr = np.ndarray((nx,ny), dtype='i') xincr = self.extent.real/nx yincr = self.extent.imag/ny def compute_escape(pos, escape): """ Compute the number of steps required to escape """ z = 0+0j; for i in range(escape): z = z**2 + pos if abs(z) > 2: break return i for x in range(nx): if x%25 == 0: print("Computing row %d" % x) for y in range(ny): pos = self.startcoords + complex(x*xincr, y*yincr) arr[x,y] = compute_escape(pos, self.escape) with file_lock: dsname = "slice%03d" % len(self.f) dset = self.f.create_dataset(dsname, (nx, ny), 'i') dset.attrs['shape'] = self.shape dset.attrs['start'] = self.startcoords dset.attrs['extent'] = self.extent dset.attrs['escape'] = self.escape dset[...] = arr print("Calculation for %s done" % dsname) self.eventcall()
ComputeThread