language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
streamlit__streamlit
lib/streamlit/elements/widgets/chat.py
{ "start": 3083, "end": 7169 }
class ____(MutableMapping[str, Any]): """Represents the value returned by `st.chat_input` after user interaction. This dataclass contains the user's input text, any files uploaded, and optionally an audio recording. It provides a dict-like interface for accessing and modifying its attributes. Attributes ---------- text : str The text input provided by the user. files : list[UploadedFile] A list of files uploaded by the user. Only present when accept_file=True. audio : UploadedFile or None, optional An audio recording uploaded by the user, if any. Only present when accept_audio=True. Notes ----- - Supports dict-like access via `__getitem__`, `__setitem__`, and `__delitem__`. - Use `to_dict()` to convert the value to a standard dictionary. - The 'files' key is only present when accept_file=True. - The 'audio' key is only present when accept_audio=True. """ text: str files: list[UploadedFile] = field(default_factory=list) audio: UploadedFile | None = None _include_files: bool = field(default=False, repr=False, compare=False) _include_audio: bool = field(default=False, repr=False, compare=False) _included_keys: tuple[str, ...] = field(init=False, repr=False, compare=False) def __post_init__(self) -> None: """Compute and cache the included keys after initialization.""" keys: list[str] = ["text"] if self._include_files: keys.append("files") if self._include_audio: keys.append("audio") object.__setattr__(self, "_included_keys", tuple(keys)) def _get_included_keys(self) -> tuple[str, ...]: """Return tuple of keys that should be exposed based on inclusion flags.""" return self._included_keys def __len__(self) -> int: return len(self._get_included_keys()) def __iter__(self) -> Iterator[str]: return iter(self._get_included_keys()) def __contains__(self, key: object) -> bool: if not isinstance(key, str): return False return key in self._get_included_keys() def __getitem__(self, item: str) -> str | list[UploadedFile] | UploadedFile | None: if item not in self._get_included_keys(): raise KeyError(f"Invalid key: {item}") try: return getattr(self, item) # type: ignore[no-any-return] except AttributeError: raise KeyError(f"Invalid key: {item}") from None def __getattribute__(self, name: str) -> Any: # Intercept access to files/audio when they're excluded # Use object.__getattribute__ to avoid infinite recursion if name == "files" and not object.__getattribute__(self, "_include_files"): raise AttributeError( "'ChatInputValue' object has no attribute 'files' (accept_file=False)" ) if name == "audio" and not object.__getattribute__(self, "_include_audio"): raise AttributeError( "'ChatInputValue' object has no attribute 'audio' (accept_audio=False)" ) # For all other attributes, use normal lookup return object.__getattribute__(self, name) def __setitem__(self, key: str, value: Any) -> None: if key not in self._get_included_keys(): raise KeyError(f"Invalid key: {key}") setattr(self, key, value) def __delitem__(self, key: str) -> None: if key not in self._get_included_keys(): raise KeyError(f"Invalid key: {key}") try: delattr(self, key) except AttributeError: raise KeyError(f"Invalid key: {key}") from None def to_dict(self) -> dict[str, str | list[UploadedFile] | UploadedFile | None]: result: dict[str, str | list[UploadedFile] | UploadedFile | None] = { "text": self.text } if self._include_files: result["files"] = self.files if self._include_audio: result["audio"] = self.audio return result
ChatInputValue
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_bedrock.py
{ "start": 14124, "end": 15161 }
class ____: DATA_SOURCE_ID = "data_source_id" @pytest.fixture def mock_conn(self) -> Generator[BaseAwsConnection, None, None]: with mock.patch.object(BedrockAgentHook, "conn") as _conn: _conn.create_data_source.return_value = {"dataSource": {"dataSourceId": self.DATA_SOURCE_ID}} yield _conn @pytest.fixture def bedrock_hook(self) -> Generator[BedrockAgentHook, None, None]: with mock_aws(): hook = BedrockAgentHook() yield hook def setup_method(self): self.operator = BedrockCreateDataSourceOperator( task_id="create_data_source", name=self.DATA_SOURCE_ID, knowledge_base_id="test_knowledge_base_id", bucket_name="test_bucket", ) def test_id_returned(self, mock_conn): result = self.operator.execute({}) assert result == self.DATA_SOURCE_ID def test_template_fields(self): validate_template_fields(self.operator)
TestBedrockCreateDataSourceOperator
python
miyuchina__mistletoe
mistletoe/block_token.py
{ "start": 6605, "end": 7366 }
class ____(BlockToken): """ Setext heading token. This is a leaf block token. Its children are inline (span) tokens. Not included in the parsing process, but called by Paragraph.__new__. Attributes: level (int): heading level. """ repr_attributes = BlockToken.repr_attributes + ("level",) def __init__(self, lines): self.underline = lines.pop().rstrip() self.level = 1 if self.underline.endswith('=') else 2 content = '\n'.join([line.strip() for line in lines]) super().__init__(content, span_token.tokenize_inner) @classmethod def start(cls, line): raise NotImplementedError() @classmethod def read(cls, lines): raise NotImplementedError()
SetextHeading
python
fastai__fastai
fastai/metrics.py
{ "start": 19198, "end": 19661 }
class ____(DiceMulti): "Averaged Jaccard coefficient metric (mIoU) for multiclass target in segmentation" @property def value(self): binary_jaccard_scores = np.array([]) for c in self.inter: binary_jaccard_scores = np.append(binary_jaccard_scores, self.inter[c]/(self.union[c]-self.inter[c]) if self.union[c] > 0 else np.nan) return np.nanmean(binary_jaccard_scores) # %% ../nbs/13b_metrics.ipynb 124
JaccardCoeffMulti
python
kubernetes-client__python
kubernetes/client/models/v1_seccomp_profile.py
{ "start": 383, "end": 5526 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'localhost_profile': 'str', 'type': 'str' } attribute_map = { 'localhost_profile': 'localhostProfile', 'type': 'type' } def __init__(self, localhost_profile=None, type=None, local_vars_configuration=None): # noqa: E501 """V1SeccompProfile - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._localhost_profile = None self._type = None self.discriminator = None if localhost_profile is not None: self.localhost_profile = localhost_profile self.type = type @property def localhost_profile(self): """Gets the localhost_profile of this V1SeccompProfile. # noqa: E501 localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type. # noqa: E501 :return: The localhost_profile of this V1SeccompProfile. # noqa: E501 :rtype: str """ return self._localhost_profile @localhost_profile.setter def localhost_profile(self, localhost_profile): """Sets the localhost_profile of this V1SeccompProfile. localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type. # noqa: E501 :param localhost_profile: The localhost_profile of this V1SeccompProfile. # noqa: E501 :type: str """ self._localhost_profile = localhost_profile @property def type(self): """Gets the type of this V1SeccompProfile. # noqa: E501 type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. # noqa: E501 :return: The type of this V1SeccompProfile. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this V1SeccompProfile. type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. # noqa: E501 :param type: The type of this V1SeccompProfile. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501 raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1SeccompProfile): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1SeccompProfile): return True return self.to_dict() != other.to_dict()
V1SeccompProfile
python
run-llama__llama_index
llama-index-core/llama_index/core/output_parsers/pydantic.py
{ "start": 400, "end": 2119 }
class ____(BaseOutputParser, Generic[Model]): """ Pydantic Output Parser. Args: output_cls (BaseModel): Pydantic output class. """ def __init__( self, output_cls: Type[Model], excluded_schema_keys_from_format: Optional[List] = None, pydantic_format_tmpl: str = PYDANTIC_FORMAT_TMPL, ) -> None: """Init params.""" self._output_cls = output_cls self._excluded_schema_keys_from_format = excluded_schema_keys_from_format or [] self._pydantic_format_tmpl = pydantic_format_tmpl @property def output_cls(self) -> Type[Model]: return self._output_cls @property def format_string(self) -> str: """Format string.""" return self.get_format_string(escape_json=True) def get_format_string(self, escape_json: bool = True) -> str: """Format string.""" schema_dict = self._output_cls.model_json_schema() for key in self._excluded_schema_keys_from_format: del schema_dict[key] schema_str = json.dumps(schema_dict) output_str = self._pydantic_format_tmpl.format(schema=schema_str) if escape_json: return output_str.replace("{", "{{").replace("}", "}}") else: return output_str def parse(self, text: str) -> Any: """Parse, validate, and correct errors programmatically.""" json_str = extract_json_str(text) return self._output_cls.model_validate_json(json_str) def format(self, query: str) -> str: """Format a query with structured output formatting instructions.""" return query + "\n\n" + self.get_format_string(escape_json=True)
PydanticOutputParser
python
gevent__gevent
src/gevent/events.py
{ "start": 12672, "end": 13258 }
class ____(GeventDidPatchEvent): """ Implementation of `IGeventDidPatchModuleEvent`. """ #: The name of the setuptools entry point that is called when this #: event is emitted. ENTRY_POINT_NAME = 'gevent.plugins.monkey.did_patch_module' def __init__(self, module_name, source, target): super(GeventDidPatchModuleEvent, self).__init__(source, target) self.module_name = module_name # TODO: Maybe it would be useful for the the module patch events # to have an attribute telling if they're being done during patch_all?
GeventDidPatchModuleEvent
python
ansible__ansible
test/units/module_utils/facts/test_sysctl.py
{ "start": 3281, "end": 9042 }
class ____(unittest.TestCase): def test_get_sysctl_missing_binary(self): module = MagicMock() module.get_bin_path.return_value = '/usr/sbin/sysctl' module.run_command.side_effect = ValueError self.assertRaises(ValueError, get_sysctl, module, ['vm']) def test_get_sysctl_nonzero_rc(self): module = MagicMock() module.get_bin_path.return_value = '/usr/sbin/sysctl' module.run_command.return_value = (1, '', '') sysctl = get_sysctl(module, ['hw']) self.assertEqual(sysctl, {}) def test_get_sysctl_command_error(self): module = MagicMock() module.get_bin_path.return_value = '/usr/sbin/sysctl' module.reset_mock() module.run_command.side_effect = OSError('foo') sysctl = get_sysctl(module, ['hw']) for call in module.error_as_warning.call_args_list: self.assertIn('Unable to read sysctl', call[0][0]) self.assertEqual(sysctl, {}) def test_get_sysctl_all_invalid_output(self): module = MagicMock() module.get_bin_path.return_value = '/sbin/sysctl' module.run_command.return_value = (0, BAD_SYSCTL, '') sysctl = get_sysctl(module, ['hw']) module.run_command.assert_called_once_with(['/sbin/sysctl', 'hw']) lines = [l for l in BAD_SYSCTL.splitlines() if l] for call in module.error_as_warning.call_args_list: self.assertIn('Unable to split sysctl line', call[0][0]) self.assertEqual(module.error_as_warning.call_count, len(lines)) self.assertEqual(sysctl, {}) def test_get_sysctl_mixed_invalid_output(self): module = MagicMock() module.get_bin_path.return_value = '/sbin/sysctl' module.run_command.return_value = (0, GOOD_BAD_SYSCTL, '') sysctl = get_sysctl(module, ['hw']) module.run_command.assert_called_once_with(['/sbin/sysctl', 'hw']) bad_lines = ['bad.output.here', 'and.bad.output.here'] for call in module.error_as_warning.call_args_list: self.assertIn('Unable to split sysctl line', call[0][0]) self.assertEqual(module.error_as_warning.call_count, 2) self.assertEqual(sysctl, {'hw.smt': '0'}) def test_get_sysctl_openbsd_hw(self): expected_lines = [l for l in OPENBSD_SYSCTL_HW.splitlines() if l] module = MagicMock() module.get_bin_path.return_value = '/sbin/sysctl' module.run_command.return_value = (0, OPENBSD_SYSCTL_HW, '') sysctl = get_sysctl(module, ['hw']) module.run_command.assert_called_once_with(['/sbin/sysctl', 'hw']) self.assertEqual(len(sysctl), len(expected_lines)) self.assertEqual(sysctl['hw.machine'], 'amd64') # first line self.assertEqual(sysctl['hw.smt'], '0') # random line self.assertEqual(sysctl['hw.ncpuonline'], '1') # last line # weird chars in value self.assertEqual( sysctl['hw.disknames'], 'cd0:,sd0:9e1bd96cb20ab429,fd0:') # more symbols/spaces in value self.assertEqual( sysctl['hw.product'], 'Standard PC (i440FX + PIIX, 1996)') def test_get_sysctl_openbsd_kern(self): module = MagicMock() module.get_bin_path.return_value = '/sbin/sysctl' module.run_command.return_value = (0, OPENBSD_SYSCTL_KERN_PARTIAL, '') sysctl = get_sysctl(module, ['kern']) module.run_command.assert_called_once_with(['/sbin/sysctl', 'kern']) self.assertEqual( len(sysctl), len( [l for l in OPENBSD_SYSCTL_KERN_PARTIAL.splitlines() if l.startswith('kern')])) self.assertEqual(sysctl['kern.ostype'], 'OpenBSD') # first line self.assertEqual(sysctl['kern.maxproc'], '1310') # random line self.assertEqual(sysctl['kern.posix1version'], '200809') # last line # multiline self.assertEqual( sysctl['kern.version'], 'OpenBSD 6.7 (GENERIC) #179: Thu May 7 11:02:37 MDT 2020\n ' 'deraadt@amd64.openbsd.org:/usr/src/sys/arch/amd64/compile/GENERIC') # more symbols/spaces in value self.assertEqual( sysctl['kern.clockrate'], 'tick = 10000, tickadj = 40, hz = 100, profhz = 100, stathz = 100') def test_get_sysctl_linux_vm(self): module = MagicMock() module.get_bin_path.return_value = '/usr/sbin/sysctl' module.run_command.return_value = (0, LINUX_SYSCTL_VM_PARTIAL, '') sysctl = get_sysctl(module, ['vm']) module.run_command.assert_called_once_with(['/usr/sbin/sysctl', 'vm']) self.assertEqual( len(sysctl), len([l for l in LINUX_SYSCTL_VM_PARTIAL.splitlines() if l])) self.assertEqual(sysctl['vm.dirty_background_ratio'], '10') self.assertEqual(sysctl['vm.laptop_mode'], '0') self.assertEqual(sysctl['vm.min_slab_ratio'], '5') # tabs self.assertEqual(sysctl['vm.lowmem_reserve_ratio'], '256\t256\t32\t0') def test_get_sysctl_macos_vm(self): module = MagicMock() module.get_bin_path.return_value = '/usr/sbin/sysctl' module.run_command.return_value = (0, MACOS_SYSCTL_VM_PARTIAL, '') sysctl = get_sysctl(module, ['vm']) module.run_command.assert_called_once_with(['/usr/sbin/sysctl', 'vm']) self.assertEqual( len(sysctl), len([l for l in MACOS_SYSCTL_VM_PARTIAL.splitlines() if l])) self.assertEqual(sysctl['vm.loadavg'], '{ 1.28 1.18 1.13 }') self.assertEqual( sysctl['vm.swapusage'], 'total = 2048.00M used = 1017.50M free = 1030.50M (encrypted)')
TestSysctlParsingInFacts
python
readthedocs__readthedocs.org
readthedocs/invitations/backends.py
{ "start": 3561, "end": 4825 }
class ____(Backend): """Team backend.""" klass = Team def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.organization = self.object.organization def get_origin_url(self): return reverse("organization_team_detail", args=[self.organization.slug, self.object.slug]) def get_object_url(self): return reverse("organization_team_detail", args=[self.organization.slug, self.object.slug]) def get_success_url(self): return reverse("organization_detail", args=[self.organization.slug]) def redeem(self, user): self.organization.add_member(user, self.object) return True def owns_object(self, user): return user in AdminPermission.owners(self.organization) def get_object_name(self): return f"{self.organization.slug} {self.object.slug}" def get_backend(invitation): """Get the proper backend for the invitation.""" backends = [ OrganizationBackend, ProjectBackend, TeamBackend, ] for backend in backends: if isinstance(invitation.object, backend.klass): return backend(invitation) raise ValueError(f"Backend not found for object of class {object.__class__}")
TeamBackend
python
GoogleCloudPlatform__python-docs-samples
compute/client_library/sgs.py
{ "start": 1696, "end": 2017 }
class ____: """ Represents a single import item in a script, created either by `import something as something_else` or `from module import something as something_else`. """ name: str asname: str def __hash__(self): return hash(f"{self.name} as {self.asname}") @dataclass
ImportItem
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/output/win32.py
{ "start": 17597, "end": 19181 }
class ____: BLACK = 0x0000 BLUE = 0x0010 GREEN = 0x0020 CYAN = 0x0030 RED = 0x0040 MAGENTA = 0x0050 YELLOW = 0x0060 GRAY = 0x0070 INTENSITY = 0x0080 # Background color is intensified. def _create_ansi_color_dict( color_cls: type[FOREGROUND_COLOR] | type[BACKGROUND_COLOR], ) -> dict[str, int]: "Create a table that maps the 16 named ansi colors to their Windows code." return { "ansidefault": color_cls.BLACK, "ansiblack": color_cls.BLACK, "ansigray": color_cls.GRAY, "ansibrightblack": color_cls.BLACK | color_cls.INTENSITY, "ansiwhite": color_cls.GRAY | color_cls.INTENSITY, # Low intensity. "ansired": color_cls.RED, "ansigreen": color_cls.GREEN, "ansiyellow": color_cls.YELLOW, "ansiblue": color_cls.BLUE, "ansimagenta": color_cls.MAGENTA, "ansicyan": color_cls.CYAN, # High intensity. "ansibrightred": color_cls.RED | color_cls.INTENSITY, "ansibrightgreen": color_cls.GREEN | color_cls.INTENSITY, "ansibrightyellow": color_cls.YELLOW | color_cls.INTENSITY, "ansibrightblue": color_cls.BLUE | color_cls.INTENSITY, "ansibrightmagenta": color_cls.MAGENTA | color_cls.INTENSITY, "ansibrightcyan": color_cls.CYAN | color_cls.INTENSITY, } FG_ANSI_COLORS = _create_ansi_color_dict(FOREGROUND_COLOR) BG_ANSI_COLORS = _create_ansi_color_dict(BACKGROUND_COLOR) assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES) assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
BACKGROUND_COLOR
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/property_graph/sub_retrievers/base.py
{ "start": 531, "end": 6241 }
class ____(BaseRetriever): """ The base class for property graph retrievers. By default, will retrieve nodes from the graph store and add source text to the nodes if needed. Args: graph_store (PropertyGraphStore): The graph store to retrieve data from. include_text (bool, optional): Whether to include source text in the retrieved nodes. Defaults to True. include_text_preamble (Optional[str], optional): The preamble to include before the source text. Defaults to DEFAULT_PREAMBLE. """ def __init__( self, graph_store: PropertyGraphStore, include_text: bool = True, include_text_preamble: Optional[str] = DEFAULT_PREAMBLE, include_properties: bool = False, **kwargs: Any, ) -> None: self._graph_store = graph_store self.include_text = include_text self._include_text_preamble = include_text_preamble self.include_properties = include_properties super().__init__(callback_manager=kwargs.get("callback_manager")) def _get_nodes_with_score( self, triplets: List[Triplet], scores: Optional[List[float]] = None ) -> List[NodeWithScore]: results = [] for i, triplet in enumerate(triplets): source_id = triplet[0].properties.get(TRIPLET_SOURCE_KEY, None) relationships = {} if source_id is not None: relationships[NodeRelationship.SOURCE] = RelatedNodeInfo( node_id=source_id ) if self.include_properties: text = f"{triplet[0]!s} -> {triplet[1]!s} -> {triplet[2]!s}" else: text = f"{triplet[0].id} -> {triplet[1].id} -> {triplet[2].id}" results.append( NodeWithScore( node=TextNode( text=text, relationships=relationships, ), score=1.0 if scores is None else scores[i], ) ) return results def _add_source_text( self, retrieved_nodes: List[NodeWithScore], og_node_map: Dict[str, BaseNode] ) -> List[NodeWithScore]: """Combine retrieved nodes/triplets with their source text, using provided preamble.""" # map of ref doc id to triplets/retrieved labelled nodes graph_node_map: Dict[str, List[str]] = {} for node in retrieved_nodes: ref_doc_id = node.node.ref_doc_id or "" if ref_doc_id not in graph_node_map: graph_node_map[ref_doc_id] = [] graph_node_map[ref_doc_id].append(node.node.get_content()) result_nodes: List[NodeWithScore] = [] for node_with_score in retrieved_nodes: mapped_node = og_node_map.get(node_with_score.node.ref_doc_id or "", None) if mapped_node: graph_content = graph_node_map.get(mapped_node.node_id, []) if len(graph_content) > 0: graph_content_str = "\n".join(graph_content) cur_content = mapped_node.get_content() preamble_text = ( self._include_text_preamble if self._include_text_preamble else "" ) new_content = ( preamble_text + graph_content_str + "\n\n" + cur_content ) mapped_node = TextNode(**mapped_node.dict()) mapped_node.text = new_content result_nodes.append( NodeWithScore( node=mapped_node, score=node_with_score.score, ) ) else: result_nodes.append(node_with_score) return result_nodes def add_source_text(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]: """Combine retrieved nodes/triplets with their source text.""" og_nodes = self._graph_store.get_llama_nodes( [x.node.ref_doc_id for x in nodes if x.node.ref_doc_id is not None] ) node_map = {node.node_id: node for node in og_nodes} return self._add_source_text(nodes, node_map) async def async_add_source_text( self, nodes: List[NodeWithScore] ) -> List[NodeWithScore]: """Combine retrieved nodes/triplets with their source text.""" og_nodes = await self._graph_store.aget_llama_nodes( [x.node.ref_doc_id for x in nodes if x.node.ref_doc_id is not None] ) og_node_map = {node.node_id: node for node in og_nodes} return self._add_source_text(nodes, og_node_map) def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: nodes = self.retrieve_from_graph(query_bundle) if self.include_text and nodes: nodes = self.add_source_text(nodes) return nodes async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: nodes = await self.aretrieve_from_graph(query_bundle) if self.include_text and nodes: nodes = await self.async_add_source_text(nodes) return nodes @abstractmethod def retrieve_from_graph(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve nodes from the labelled property graph.""" ... @abstractmethod async def aretrieve_from_graph( self, query_bundle: QueryBundle ) -> List[NodeWithScore]: """Retrieve nodes from the labelled property graph.""" ...
BasePGRetriever
python
tensorflow__tensorflow
tensorflow/python/eager/polymorphic_function/concrete_function.py
{ "start": 5007, "end": 14697 }
class ____(object): """Caches forward/backward functions with a delayed forward rewrite.""" def __init__( self, atomic_fn: atomic_function.AtomicFunction, func_graph_deleter ): """Construct an inference function and initialize caches.""" # A map from the number of forward function outputs with accepted gradients # to forward and backward functions, used to cache non-tape backward # function generation. self._cached_function_pairs = {} self._func_graph = atomic_fn.graph self._inference_function = atomic_fn self._attrs = atomic_fn.attributes self._gradient_name = None # Note that the FuncGraph is mutated later, so we need to inspect it now to # figure out the user-specified outputs of the inference function. self._num_inference_outputs = len(self._func_graph.outputs) self._func_graph_deleter = func_graph_deleter def forward_backward(self, num_doutputs=None): """A possibly-cached pair of forward and backward functions.""" if num_doutputs is None: num_doutputs = self._num_inference_outputs forward_backward = self._cached_function_pairs.get(num_doutputs) if forward_backward is not None: return forward_backward forward, backward = self._construct_forward_backward(num_doutputs) self._cached_function_pairs[num_doutputs] = (forward, backward) return forward, backward def _construct_forward_backward(self, num_doutputs): """Constructs a pair of forward and backward functions. Args: num_doutputs: The constructed backprop function will take output gradients for the first `num_doutputs` outputs of the forward function. Defaults to the number of outputs for the inference function, but when higher-order gradients are computed this will increase to include side outputs. Returns: A pair of (forward_function, backward_function): forward_function: A re-generated inference function (an AtomicFunction) to account for new side outputs, if any extra were required when building the backward pass. backward_function: A ConcreteFunction that Takes `num_doutputs` arguments and returns gradients with respect to inputs of the forward function. """ trainable_outputs = [ output for output in self._func_graph.outputs[:num_doutputs] if backprop_util.IsTrainable(output)] signature = [] for t in trainable_outputs: signature.append( tensor_lib.TensorSpec(*default_gradient.shape_and_dtype(t))) def _backprop_function(*grad_ys): with ops.device(None): return gradients_util._GradientsHelper( # pylint: disable=protected-access trainable_outputs, self._func_graph.inputs, grad_ys=grad_ys, src_graph=self._func_graph) with self._func_graph.as_default(): backwards_graph = func_graph_module.FuncGraph( _backward_name(self._func_graph.name)) func_graph_module.func_graph_from_py_func( name=backwards_graph.name, python_func=_backprop_function, args=[], kwargs={}, signature=signature, func_graph=backwards_graph) backwards_graph_captures = backwards_graph.external_captures captures_from_forward = [ c for c in backwards_graph_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph] existing_outputs = object_identity.ObjectIdentitySet( self._func_graph.outputs) for capture in captures_from_forward: if capture not in existing_outputs: existing_outputs.add(capture) self._func_graph.outputs.append(capture) forward_function, backward_function = _create_forward_backward_with_graph( self._attrs, self._func_graph, backwards_graph) return forward_function, backward_function def _rewrite_forward_and_call_backward(self, op: ops.Operation, *doutputs): """Add outputs to the forward call and feed them to the grad function.""" forward_function, backwards_function = self.forward_backward(len(doutputs)) if not backwards_function.outputs: return backwards_function.structured_outputs op.graph._add_function_recursive(forward_function) # pylint: disable=protected-access # pylint: disable=protected-access # Rewrite an inference call op to be a forward call op op._set_func_attr("f", forward_function.name) op._set_type_list_attr( "Tout", [ o.dtype.as_datatype_enum for o in forward_function.function_type.flat_outputs ], ) truncated_outputs = forward_function.function_type.flat_outputs[ len(op.outputs) : ] op._add_outputs( [o.dtype.as_datatype_enum for o in truncated_outputs], [o.shape for o in truncated_outputs], ) for i in range(len(op.outputs)): output_type = forward_function.function_type.flat_outputs[i] handle_data = output_type.dtype._handle_data if handle_data: handle_data_util.set_handle_data( op.outputs[i], handle_data.shape_inference ) # pylint: enable=protected-access capture_mapping = dict( zip((ops.tensor_id(t) for t in self._func_graph.outputs), op.outputs)) remapped_captures = [ capture_mapping.get(ops.tensor_id(capture), capture) for capture in backwards_function.captured_inputs ] # Replace Nones with zeros since we're calling a graph function which # expects numeric inputs. cleaned_doutputs = [] for doutput, placeholder in zip(doutputs, self._func_graph.outputs): if backprop_util.IsTrainable(placeholder): if isinstance(doutput, indexed_slices.IndexedSlices): # Gradient passed to a backward ConcreteFunction must be tf.Tensor, # so we convert tf.IndexedSlices to tf.Tensor. cleaned_doutputs.append(ops.convert_to_tensor(doutput)) elif doutput is not None: cleaned_doutputs.append(doutput) else: cleaned_doutputs.append(default_gradient.zeros_like(placeholder)) # Compute the gradients using the side outputs return backwards_function._call_flat( # pylint: disable=protected-access cleaned_doutputs, remapped_captures) def get_gradient_function(self): """Returns gradient function. The gradient rewrites an inference call op to a forward call op, but does not modify a pre-existing forward call op. It then computes the gradient from the output's gradients and the side outputs of the forward op. """ return self._rewrite_forward_and_call_backward def forward(self, inference_args=None, input_tangents=None): """A forward function with only user-specified outputs. The call operation for the returned inference function can be rewritten into a forward function. This only happens if the backward function (from the `backward` method) ends up being used to compute gradients. This approach avoids constructing unnecessary graphs, but it only works if we are calling this function when not executing eagerly. Args: inference_args: A flat list of Tensors, arguments to the inference function. Unused, but taken for compatibility with _TapeGradientFunctions. input_tangents: A flat list of Tensors, jvps associated with `inference_args`. Unused; if required, tape functions must be used instead. Returns: An atomic_function.AtomicFunction. """ del inference_args # unused if input_tangents: # This class does not support special-cased forwardprop. The arguments are # here for compatibility with _TapeGradientFunctions. raise errors.InternalError("unexpectedly got forwardprop information in " "a class that does not support forwardprop.") return self._inference_function def _backward(self, outputs): """Fetch a backward function for `outputs` from the forward function.""" def _backward_function(*args): call_op = outputs[0].op return self._rewrite_forward_and_call_backward(call_op, *args) return _backward_function, outputs def record(self, flat_outputs, inference_args, input_tangents): """Record the function call operation. _DelayedRewriteGradientFunctions supports only first-order backprop tape gradients (and then only when graph building). It does not work with higher-order tape gradients or forward autodiff, but does work with higher-order symbolic gradients (tf.gradients). Args: flat_outputs: The result of running `forward`. inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the operation. """ backward_function, to_record = self._backward(flat_outputs) record.record_operation( self._inference_function.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function, ) # Contains information about a forward function wrapped to compute jvps. _ForwardWrapper = collections.namedtuple( "_ForwardWrapper", ( # The wrapper Graph. "graph", # A flat list of non-tangent Tensor outputs from the wrapped forward # function. "outputs", # Indices for output tangents, same format as # forwardprop_util.pack_tangents. "output_indices", # A flat list of tangents for `outputs`. "output_tangents"))
_DelayedRewriteGradientFunctions
python
getsentry__sentry
src/sentry/models/dashboard_widget.py
{ "start": 5021, "end": 6677 }
class ____(Model): """ A query in a dashboard widget. """ __relocation_scope__ = RelocationScope.Organization widget = FlexibleForeignKey("sentry.DashboardWidget") name = models.CharField(max_length=255) fields = ArrayField(models.TextField(), default=list) conditions = models.TextField() # aggregates and columns will eventually replace fields. # Using django's built-in array field here since the one # from sentry/db/model/fields.py adds a default value to the # database migration. aggregates = ArrayField(models.TextField(), null=True) columns = ArrayField(models.TextField(), null=True) # Currently only used for tabular widgets. # If an alias is defined it will be shown in place of the field description in the table header field_aliases = ArrayField(models.TextField(), null=True) # Orderby condition for the query orderby = models.TextField(default="") # Order of the widget query in the widget. order = BoundedPositiveIntegerField() date_added = models.DateTimeField(default=timezone.now) date_modified = models.DateTimeField(default=timezone.now, db_default=Now()) # Whether this query is hidden from the UI, used by metric widgets is_hidden = models.BooleanField(default=False, db_default=False) # Used by Big Number to select aggregate displayed selected_aggregate = models.IntegerField(null=True) class Meta: app_label = "sentry" db_table = "sentry_dashboardwidgetquery" unique_together = (("widget", "order"),) __repr__ = sane_repr("widget", "type", "name") @region_silo_model
DashboardWidgetQuery
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column.py
{ "start": 96857, "end": 104187 }
class ____( _DenseColumn, _SequenceDenseColumn, collections.namedtuple( '_SharedEmbeddingColumn', ('categorical_column', 'dimension', 'combiner', 'initializer', 'shared_embedding_collection_name', 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable', 'use_safe_embedding_lookup'))): """See `embedding_column`.""" @property def name(self): if not hasattr(self, '_name'): self._name = '{}_shared_embedding'.format(self.categorical_column.name) return self._name @property def _var_scope_name(self): return self.shared_embedding_collection_name @property def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access def _transform_feature(self, inputs): return inputs.get(self.categorical_column) @property def _variable_shape(self): if not hasattr(self, '_shape'): self._shape = tensor_shape.TensorShape([self.dimension]) return self._shape def _get_dense_tensor_internal(self, inputs, weight_collections=None, trainable=None): """Private method that follows the signature of _get_dense_tensor.""" # This method is called from a variable_scope with name _var_scope_name, # which is shared among all shared embeddings. Open a name_scope here, so # that the ops for different columns have distinct names. with ops.name_scope(None, default_name=self.name): # Get sparse IDs and weights. sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections=weight_collections, trainable=trainable) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access shared_embedding_collection = ops.get_collection( self.shared_embedding_collection_name) if shared_embedding_collection: if len(shared_embedding_collection) > 1: raise ValueError( 'Collection {} can only contain one variable. ' 'Suggested fix A: Choose a unique name for this collection. ' 'Suggested fix B: Do not add any variables to this collection. ' 'The feature_column library already adds a variable under the ' 'hood.'.format(shared_embedding_collection)) embedding_weights = shared_embedding_collection[0] if embedding_weights.get_shape() != embedding_shape: raise ValueError( 'Shared embedding collection {} contains variable {} of ' 'unexpected shape {}. Expected shape is {}. ' 'Suggested fix A: Choose a unique name for this collection. ' 'Suggested fix B: Do not add any variables to this collection. ' 'The feature_column library already adds a variable under the ' 'hood.'.format(self.shared_embedding_collection_name, embedding_weights.name, embedding_weights.get_shape(), embedding_shape)) else: embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=embedding_shape, dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) ops.add_to_collection(self.shared_embedding_collection_name, embedding_weights) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint( self.ckpt_to_load_from, {self.tensor_name_in_ckpt: to_restore}) sparse_id_rank = tensor_shape.dimension_value( sparse_ids.dense_shape.get_shape()[0]) embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse if (not self.use_safe_embedding_lookup and sparse_id_rank is not None and sparse_id_rank <= 2): embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse_v2 # Return embedding lookup result. return embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): if isinstance(self.categorical_column, _SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must not be of type _SequenceCategoricalColumn. ' 'Suggested fix A: If you wish to use input_layer, use a ' 'non-sequence categorical_column_with_*. ' 'Suggested fix B: If you wish to create sequence input, use ' 'sequence_input_layer instead of input_layer. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) return self._get_dense_tensor_internal( inputs=inputs, weight_collections=weight_collections, trainable=trainable) def _get_sequence_dense_tensor(self, inputs, weight_collections=None, trainable=None): if not isinstance(self.categorical_column, _SequenceCategoricalColumn): raise ValueError( 'In embedding_column: {}. ' 'categorical_column must be of type _SequenceCategoricalColumn ' 'to use sequence_input_layer. ' 'Suggested fix: Use one of sequence_categorical_column_with_*. ' 'Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column)) dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access inputs=inputs, weight_collections=weight_collections, trainable=trainable) sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access sequence_length = fc_utils.sequence_length_from_sparse_tensor( sparse_tensors.id_tensor) return _SequenceDenseColumn.TensorSequenceLengthPair( dense_tensor=dense_tensor, sequence_length=sequence_length) def _check_shape(shape, key): """Returns shape if it's valid, raises error otherwise.""" assert shape is not None if not nest.is_nested(shape): shape = [shape] shape = tuple(shape) for dimension in shape: if not isinstance(dimension, six.integer_types): raise TypeError('shape dimensions must be integer. ' 'shape: {}, key: {}'.format(shape, key)) if dimension < 1: raise ValueError('shape dimensions must be greater than 0. ' 'shape: {}, key: {}'.format(shape, key)) return shape
_SharedEmbeddingColumn
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format09.py
{ "start": 315, "end": 2244 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format09.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 11}, "default_row_height": 19, "default_column_width": 72, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column_pixels(4, 4, 96) worksheet.set_row_pixels(8, 32) # Set column to text column width less than 1 character. worksheet.set_column_pixels(6, 6, 10) workbook.close() self.assertExcelEqual() def test_create_file_with_character_units(self): """Test the creation of a file with user defined default format""" # Same as workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Arial", "font_size": 11}, "default_row_height": 19, "default_column_width": 72, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column(4, 4, 11.38) worksheet.set_row(8, 24.0) # Set column to text column width less than 1 character. worksheet.set_column(6, 6, 0.77) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
wandb__wandb
landfill/functional_tests/artifacts/use-model.py
{ "start": 175, "end": 1416 }
class ____(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def main(): run = wandb.init() my_model = Net() sm = _SavedModel.init(my_model) art = wandb.Artifact("my-model", "model") art.add(sm, "index") art = run.log_artifact(art) art.wait() # use_model() hits the download path where we try to download the file # using entry._file_url, which fails in this test harness # TODO: Remove the download() call once caching is implemented in nexus art.download() _ = use_model("my-model:latest") run.finish() if __name__ == "__main__": main()
Net
python
huggingface__transformers
src/transformers/models/mt5/modeling_mt5.py
{ "start": 1693, "end": 2878 }
class ____(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the MT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # MT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->MT5
MT5LayerNorm
python
numba__numba
numba/tests/test_pycc.py
{ "start": 11279, "end": 14894 }
class ____(TestCase): def setUp(self): super().setUp() self.skip_if_no_external_compiler() # external compiler needed unset_macosx_deployment_target() # Copy the test project into a temp directory to avoid # keeping any build leftovers in the source tree self.tmpdir = temp_directory('test_pycc_distutils') source_dir = os.path.join(base_path, 'pycc_distutils_usecase') self.usecase_dir = os.path.join(self.tmpdir, 'work') shutil.copytree(source_dir, self.usecase_dir) def check_setup_py(self, setup_py_file): # Compute PYTHONPATH to ensure the child processes see this Numba import numba numba_path = os.path.abspath(os.path.dirname( os.path.dirname(numba.__file__))) env = dict(os.environ) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH'] else: env['PYTHONPATH'] = numba_path def run_python(args): p = subprocess.Popen([sys.executable] + args, cwd=self.usecase_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) out, _ = p.communicate() rc = p.wait() if rc != 0: self.fail("python failed with the following output:\n%s" % out.decode('utf-8', 'ignore')) run_python([setup_py_file, "build_ext", "--inplace"]) code = """if 1: import pycc_compiled_module as lib assert lib.get_const() == 42 res = lib.ones(3) assert list(res) == [1.0, 1.0, 1.0] """ run_python(["-c", code]) def check_setup_nested_py(self, setup_py_file): # Compute PYTHONPATH to ensure the child processes see this Numba import numba numba_path = os.path.abspath(os.path.dirname( os.path.dirname(numba.__file__))) env = dict(os.environ) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH'] else: env['PYTHONPATH'] = numba_path def run_python(args): p = subprocess.Popen([sys.executable] + args, cwd=self.usecase_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) out, _ = p.communicate() rc = p.wait() if rc != 0: self.fail("python failed with the following output:\n%s" % out.decode('utf-8', 'ignore')) run_python([setup_py_file, "build_ext", "--inplace"]) code = """if 1: import nested.pycc_compiled_module as lib assert lib.get_const() == 42 res = lib.ones(3) assert list(res) == [1.0, 1.0, 1.0] """ run_python(["-c", code]) def test_setup_py_distutils(self): self.check_setup_py("setup_distutils.py") def test_setup_py_distutils_nested(self): self.check_setup_nested_py("setup_distutils_nested.py") def test_setup_py_setuptools(self): self.check_setup_py("setup_setuptools.py") def test_setup_py_setuptools_nested(self): self.check_setup_nested_py("setup_setuptools_nested.py") if __name__ == "__main__": unittest.main()
TestDistutilsSupport
python
ray-project__ray
python/ray/train/v2/_internal/execution/train_fn_utils.py
{ "start": 5488, "end": 7342 }
class ____(TrainFnUtils): def report( self, metrics: Dict[str, Any], checkpoint: Optional["Checkpoint"] = None, checkpoint_dir_name: Optional[str] = None, checkpoint_upload_mode: CheckpointUploadMode = CheckpointUploadMode.SYNC, delete_local_checkpoint_after_upload: Optional[bool] = None, checkpoint_upload_fn: Optional[ Callable[["Checkpoint", str], "Checkpoint"] ] = None, validate_fn: Optional[Callable[["Checkpoint", Optional[Dict]], Dict]] = None, validate_config: Optional[Dict] = None, ) -> None: return get_internal_train_context().report( metrics, checkpoint, checkpoint_dir_name, checkpoint_upload_mode, delete_local_checkpoint_after_upload, checkpoint_upload_fn, validate_fn, validate_config, ) def get_checkpoint(self): return get_internal_train_context().get_checkpoint() def get_dataset_shard(self, dataset_info: DatasetShardMetadata) -> DataIterator: return get_internal_train_context().get_dataset_shard(dataset_info) def get_context(self) -> DistributedTrainContext: return DistributedTrainContext() def is_distributed(self) -> bool: return True def barrier(self) -> None: return collective_impl.barrier() def broadcast_from_rank_zero(self, data: Any) -> Any: return collective_impl.broadcast_from_rank_zero(data) def get_all_reported_checkpoints( self, consistency_mode: CheckpointConsistencyMode = CheckpointConsistencyMode.VALIDATED, ) -> List["ReportedCheckpoint"]: return get_internal_train_context().get_all_reported_checkpoints( consistency_mode=consistency_mode )
DistributedTrainFnUtils
python
pytorch__pytorch
torch/utils/data/datapipes/_typing.py
{ "start": 851, "end": 895 }
class ____(numbers.Integral): pass
Integer
python
huggingface__transformers
src/transformers/models/unispeech_sat/configuration_unispeech_sat.py
{ "start": 847, "end": 18855 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UniSpeechSat [microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`UniSpeechSatModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`UniSpeechSatModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the output of the feature encoder that's used by the quantizer. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`UniSpeechSatForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_extract_activation (`str, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. do_stable_layer_norm (`bool`, *optional*, defaults to `False`): Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is False` corresponds to applying layer norm after the attention layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' num_codevectors_per_group (`int`, *optional*, defaults to 320): Number of entries in each quantization codebook (group). num_codevector_groups (`int`, *optional*, defaults to 2): Number of codevector groups for product codevector quantization. contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): The temperature *kappa* in the contrastive loss. num_negatives (`int`, *optional*, defaults to 100): Number of negative samples for the contrastive loss. codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the quantized feature vectors. proj_codevector_dim (`int`, *optional*, defaults to 256): Dimensionality of the final projection of both the quantized and the transformer features. diversity_loss_weight (`int`, *optional*, defaults to 0.1): The weight of the codebook diversity loss component. ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`UniSpeechSatForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`UniSpeechSatForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`UniSpeechSatForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. num_clusters (`int`, *optional*, defaults to 504): Number of clusters for weak labeling. Only relevant when using an instance of [`UniSpeechSatForPreTraining`]. Example: ```python >>> from transformers import UniSpeechSatModel, UniSpeechSatConfig >>> # Initializing a UniSpeechSat microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> configuration = UniSpeechSatConfig() >>> # Initializing a model from the microsoft/unispeech-sat-base-100h-libri-ft style configuration >>> model = UniSpeechSatModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "unispeech-sat" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_clusters=504, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.num_clusters = num_clusters self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # parameters for pretraining with codevector quantized representations self.num_codevectors_per_group = num_codevectors_per_group self.num_codevector_groups = num_codevector_groups self.contrastive_logits_temperature = contrastive_logits_temperature self.feat_quantizer_dropout = feat_quantizer_dropout self.num_negatives = num_negatives self.codevector_dim = codevector_dim self.proj_codevector_dim = proj_codevector_dim self.diversity_loss_weight = diversity_loss_weight # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) __all__ = ["UniSpeechSatConfig"]
UniSpeechSatConfig
python
great-expectations__great_expectations
great_expectations/checkpoint/actions.py
{ "start": 34952, "end": 38859 }
class ____(DataDocsAction): """Notify the site builders of all data docs sites of a Data Context that a validation result should be added to the data docs. YAML configuration example: ```yaml - name: update_data_docs action: class_name: UpdateDataDocsAction ``` You can also instruct ``UpdateDataDocsAction`` to build only certain sites by providing a ``site_names`` key with a list of sites to update: ```yaml - name: update_data_docs action: class_name: UpdateDataDocsAction site_names: - local_site ``` Args: site_names: Optional. A list of the names of sites to update. """ # noqa: E501 # FIXME CoP type: Literal["update_data_docs"] = "update_data_docs" site_names: List[str] = [] @override def run( self, checkpoint_result: CheckpointResult, action_context: ActionContext | None = None ) -> dict: action_results: dict[ValidationResultIdentifier, dict[str, str]] = {} for result_identifier, result in checkpoint_result.run_results.items(): suite_name = result.suite_name expectation_suite_identifier: ExpectationSuiteIdentifier | GXCloudIdentifier if self._using_cloud_context: expectation_suite_identifier = GXCloudIdentifier( resource_type=GXCloudRESTResource.EXPECTATION_SUITE, resource_name=suite_name ) else: expectation_suite_identifier = ExpectationSuiteIdentifier(name=suite_name) action_result = self._run( validation_result_suite=result, validation_result_suite_identifier=result_identifier, expectation_suite_identifier=expectation_suite_identifier, ) action_results[result_identifier] = action_result return action_results def _run( self, validation_result_suite: ExpectationSuiteValidationResult, validation_result_suite_identifier: Union[ValidationResultIdentifier, GXCloudIdentifier], action_context=None, expectation_suite_identifier=None, checkpoint_identifier=None, ): logger.debug("UpdateDataDocsAction.run") if validation_result_suite is None: logger.warning( f"No validation_result_suite was passed to {type(self).__name__} action. Skipping action." # noqa: E501 # FIXME CoP ) return if not isinstance( validation_result_suite_identifier, (ValidationResultIdentifier, GXCloudIdentifier), ): raise TypeError( # noqa: TRY003 # FIXME CoP "validation_result_id must be of type ValidationResultIdentifier or" f" GeCloudIdentifier, not {type(validation_result_suite_identifier)}" ) # TODO Update for RenderedDataDocs # build_data_docs will return the index page for the validation results, but we want to return the url for the validation result using the code below # noqa: E501 # FIXME CoP self._build_data_docs( site_names=self.site_names, resource_identifiers=[ validation_result_suite_identifier, expectation_suite_identifier, ], ) data_docs_validation_results: dict = {} if self._using_cloud_context: return data_docs_validation_results # get the URL for the validation result docs_site_urls_list = self._get_docs_sites_urls( resource_identifier=validation_result_suite_identifier, site_names=self.site_names, ) # process payload for sites in docs_site_urls_list: data_docs_validation_results[sites["site_name"]] = sites["site_url"] return data_docs_validation_results
UpdateDataDocsAction
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/bitcast_op_test.py
{ "start": 1065, "end": 3373 }
class ____(test.TestCase): def _testBitcast(self, x, datatype, shape): with test_util.use_gpu(): tf_ans = array_ops.bitcast(x, datatype) out = self.evaluate(tf_ans) if sys.byteorder == "little": buff_after = memoryview(out).tobytes() buff_before = memoryview(x).tobytes() else: buff_after = memoryview(out.byteswap()).tobytes() buff_before = memoryview(x.byteswap()).tobytes() self.assertEqual(buff_before, buff_after) self.assertEqual(tf_ans.get_shape(), shape) self.assertEqual(tf_ans.dtype, datatype) def testSmaller(self): x = np.random.rand(3, 2) datatype = dtypes.int8 shape = [3, 2, 8] self._testBitcast(x, datatype, shape) def testLarger(self): x = np.arange(16, dtype=np.int8).reshape([4, 4]) datatype = dtypes.int32 shape = [4] self._testBitcast(x, datatype, shape) def testSameDtype(self): x = np.random.rand(3, 4) shape = [3, 4] self._testBitcast(x, x.dtype, shape) def testSameSize(self): x = np.random.rand(3, 4) shape = [3, 4] self._testBitcast(x, dtypes.int64, shape) def testErrors(self): x = np.zeros([1, 1], np.int8) datatype = dtypes.int32 # When eager_op_as_function is enabled shape inference will raise # a different more informative error message. with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), "Cannot bitcast from 6 to 3|convert from s8.* to S32"): array_ops.bitcast(x, datatype, None) def testEmpty(self): x = np.ones([], np.int32) datatype = dtypes.int8 shape = [4] self._testBitcast(x, datatype, shape) def testUnknownShape(self): # Need to use placeholder for unknown shape with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32) datatype = dtypes.int8 array_ops.bitcast(x, datatype, None) @test_util.disable_tfrt("b/169901260") def testQuantizedType(self): shape = [3, 4] x = np.zeros(shape, np.uint16) datatype = dtypes.quint16 self._testBitcast(x, datatype, shape) def testUnsignedType(self): shape = [3, 4] x = np.zeros(shape, np.int64) datatype = dtypes.uint64 self._testBitcast(x, datatype, shape) if __name__ == "__main__": test.main()
BitcastTest
python
ray-project__ray
python/ray/data/_internal/logical/rules/inherit_batch_format.py
{ "start": 305, "end": 1684 }
class ____(Rule): """For AbstractAllToAll based operator, apply this rule to inherit batch_format from upstream operator by traversing the entire DAG.""" def apply(self, plan: LogicalPlan) -> LogicalPlan: optimized_dag: LogicalOperator = self._apply(plan.dag) new_plan = LogicalPlan(dag=optimized_dag, context=plan.context) return new_plan def _apply(self, op: LogicalOperator): # Post-order traversal. nodes: Iterable[LogicalOperator] = deque() for node in op.post_order_iter(): nodes.appendleft(node) while len(nodes) > 0: current_op = nodes.pop() if isinstance(current_op, AbstractAllToAll): # traversal up the DAG until we find MapBatches with batch_format # or we reach to source op and do nothing upstream_op = current_op.input_dependencies[0] while upstream_op.input_dependencies: if ( isinstance(upstream_op, MapBatches) and upstream_op._batch_format ): current_op._batch_format = upstream_op._batch_format break upstream_op = upstream_op.input_dependencies[0] # just return the default op return op
InheritBatchFormatRule
python
pytorch__pytorch
torch/nn/modules/padding.py
{ "start": 5456, "end": 7541 }
class ____(_CircularPadNd): r"""Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 6-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`, :math:`\text{padding\_front}`, :math:`\text{padding\_back}`) Note that padding size should be less than or equal to the corresponding input dimension. Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}` :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}` :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = nn.CircularPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input) """ # pyrefly: ignore [bad-override] padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__() self.padding = _ntuple(6)(padding) def _check_input_dim(self, input) -> None: if input.dim() != 4 and input.dim() != 5: raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)")
CircularPad3d
python
great-expectations__great_expectations
great_expectations/core/metric_function_types.py
{ "start": 4940, "end": 5211 }
class ____(enum.Enum): """Enum type, whose members specify available suffixes for metrics representing partial functions.""" # noqa: E501 # FIXME CoP MAP = "map" CONDITION = "condition" AGGREGATE_FUNCTION = "aggregate_fn"
MetricPartialFunctionTypeSuffixes
python
jmcnamara__XlsxWriter
xlsxwriter/test/workbook/test_write_workbook.py
{ "start": 299, "end": 973 }
class ____(unittest.TestCase): """ Test the Workbook _write_workbook() method. """ def setUp(self): self.fh = StringIO() self.workbook = Workbook() self.workbook._set_filehandle(self.fh) def test_write_workbook(self): """Test the _write_workbook() method""" self.workbook._write_workbook() exp = """<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">""" got = self.fh.getvalue() self.assertEqual(exp, got) def tearDown(self): self.workbook.fileclosed = 1
TestWriteWorkbook
python
encode__django-rest-framework
tests/test_generics.py
{ "start": 15143, "end": 15274 }
class ____: def filter_queryset(self, request, queryset, view): return queryset.filter(text='foo')
InclusiveFilterBackend
python
pennersr__django-allauth
tests/apps/socialaccount/providers/auth0/tests.py
{ "start": 238, "end": 822 }
class ____(OAuth2TestsMixin, TestCase): provider_id = Auth0Provider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ { "picture": "https://secure.gravatar.com/avatar/123", "email": "mr.bob@your.Auth0.server.example.com", "id": 2, "sub": 2, "identities": [], "name": "Mr Bob" } """, ) def get_expected_to_str(self): return "mr.bob@your.Auth0.server.example.com"
Auth0Tests
python
sqlalchemy__sqlalchemy
test/orm/test_rel_fn.py
{ "start": 35763, "end": 37230 }
class ____(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): def test_determine_direction_compound_2(self): joincond = self._join_fixture_compound_expression_2(support_sync=False) is_(joincond.direction, ONETOMANY) def test_determine_direction_o2m(self): joincond = self._join_fixture_o2m() is_(joincond.direction, ONETOMANY) def test_determine_direction_o2m_selfref(self): joincond = self._join_fixture_o2m_selfref() is_(joincond.direction, ONETOMANY) def test_determine_direction_m2o_selfref(self): joincond = self._join_fixture_m2o_selfref() is_(joincond.direction, MANYTOONE) def test_determine_direction_o2m_composite_selfref(self): joincond = self._join_fixture_o2m_composite_selfref() is_(joincond.direction, ONETOMANY) def test_determine_direction_m2o_composite_selfref(self): joincond = self._join_fixture_m2o_composite_selfref() is_(joincond.direction, MANYTOONE) def test_determine_direction_m2o(self): joincond = self._join_fixture_m2o() is_(joincond.direction, MANYTOONE) def test_determine_direction_purely_single_o2m(self): joincond = self._join_fixture_purely_single_o2m() is_(joincond.direction, ONETOMANY) def test_determine_direction_purely_single_m2o(self): joincond = self._join_fixture_purely_single_m2o() is_(joincond.direction, MANYTOONE)
DirectionTest
python
google__jax
jax/_src/dtypes.py
{ "start": 2302, "end": 10831 }
class ____(StrictABC): """Abstract Base Class for extended dtypes""" @property @abc.abstractmethod def type(self) -> type: ... # fp8 support float8_e3m4: type[np.generic] = ml_dtypes.float8_e3m4 float8_e4m3: type[np.generic] = ml_dtypes.float8_e4m3 float8_e8m0fnu: type[np.generic] = ml_dtypes.float8_e8m0fnu float8_e4m3b11fnuz: type[np.generic] = ml_dtypes.float8_e4m3b11fnuz float8_e4m3fn: type[np.generic] = ml_dtypes.float8_e4m3fn float8_e4m3fnuz: type[np.generic] = ml_dtypes.float8_e4m3fnuz float8_e5m2: type[np.generic] = ml_dtypes.float8_e5m2 float8_e5m2fnuz: type[np.generic] = ml_dtypes.float8_e5m2fnuz _float8_e3m4_dtype: np.dtype = np.dtype(float8_e3m4) _float8_e4m3_dtype: np.dtype = np.dtype(float8_e4m3) _float8_e8m0fnu_dtype: np.dtype = np.dtype(float8_e8m0fnu) _float8_e4m3b11fnuz_dtype: np.dtype = np.dtype(float8_e4m3b11fnuz) _float8_e4m3fn_dtype: np.dtype = np.dtype(float8_e4m3fn) _float8_e4m3fnuz_dtype: np.dtype = np.dtype(float8_e4m3fnuz) _float8_e5m2_dtype: np.dtype = np.dtype(float8_e5m2) _float8_e5m2fnuz_dtype: np.dtype = np.dtype(float8_e5m2fnuz) # fp4 support float4_e2m1fn: type[np.generic] = ml_dtypes.float4_e2m1fn _float4_e2m1fn_dtype: np.dtype = np.dtype(float4_e2m1fn) def supports_inf(dtype: DTypeLike) -> bool: """Return true if the dtype supports infinity, else return False.""" typ = np.dtype(dtype).type if typ in {float8_e4m3b11fnuz, float8_e4m3fn, float8_e4m3fnuz, float8_e5m2fnuz}: return False return issubdtype(dtype, np.inexact) # bfloat16 support bfloat16: type[np.generic] = ml_dtypes.bfloat16 _bfloat16_dtype: np.dtype = np.dtype(bfloat16) _custom_float_scalar_types = [ float4_e2m1fn, float8_e3m4, float8_e4m3, float8_e8m0fnu, float8_e4m3b11fnuz, float8_e4m3fn, float8_e4m3fnuz, float8_e5m2, float8_e5m2fnuz, bfloat16, ] _custom_float_dtypes = [ _float4_e2m1fn_dtype, _float8_e3m4_dtype, _float8_e4m3_dtype, _float8_e8m0fnu_dtype, _float8_e4m3b11fnuz_dtype, _float8_e4m3fn_dtype, _float8_e4m3fnuz_dtype, _float8_e5m2_dtype, _float8_e5m2fnuz_dtype, _bfloat16_dtype, ] _float8_dtypes = [ _float8_e3m4_dtype, _float8_e4m3_dtype, _float8_e8m0fnu_dtype, _float8_e4m3b11fnuz_dtype, _float8_e4m3fn_dtype, _float8_e4m3fnuz_dtype, _float8_e5m2_dtype, _float8_e5m2fnuz_dtype, ] _float4_dtypes: list[np.dtype] = [ _float4_e2m1fn_dtype, ] int2: type[np.generic] = ml_dtypes.int2 uint2: type[np.generic] = ml_dtypes.uint2 _int2_dtype: np.dtype = np.dtype(int2) _uint2_dtype: np.dtype = np.dtype(uint2) # 4-bit integer support int4: type[np.generic] = ml_dtypes.int4 uint4: type[np.generic] = ml_dtypes.uint4 _int4_dtype = np.dtype(int4) _uint4_dtype = np.dtype(uint4) _intn_dtypes = [ _int2_dtype, _uint2_dtype, _int4_dtype, _uint4_dtype, ] # Default types. bool_ = np.bool_ int_: type[Any] uint: type[Any] float_: type[Any] complex_: type[Any] if config.default_dtype_bits.value == '32': int_ = np.int32 uint = np.uint32 float_ = np.float32 complex_ = np.complex64 else: int_ = np.int64 uint = np.uint64 float_ = np.float64 complex_ = np.complex128 # Default dtypes. These are intended to have the same semantics as, say, # canonicalize_dtype(np.float64), but are preparing for the reduction in the # number of places we perform dtype canonicalization. def default_int_dtype() -> DType: return ( np.dtype(np.int64) if config.enable_x64.value and config.default_dtype_bits.value == '64' else np.dtype(np.int32) ) def default_uint_dtype() -> DType: return ( np.dtype(np.uint64) if config.enable_x64.value and config.default_dtype_bits.value == '64' else np.dtype(np.uint32) ) def default_float_dtype() -> DType: return ( np.dtype(np.float64) if config.enable_x64.value and config.default_dtype_bits.value == '64' else np.dtype(np.float32) ) def default_complex_dtype() -> DType: return ( np.dtype(np.complex128) if config.enable_x64.value and config.default_dtype_bits.value == '64' else np.dtype(np.complex64) ) default_types: dict[str, Callable[[], DType]] = { 'b': lambda: np.dtype(bool), 'i': default_int_dtype, 'u': default_uint_dtype, 'f': default_float_dtype, 'c': default_complex_dtype, } def jax_dtype(obj: DTypeLike | None, *, align: bool = False, copy: bool = False) -> DType: """Cast an object to a dtype, respecting JAX dtype defaults. Arguments mirror those of :func:`numpy.dtype`. """ if obj is None: obj = default_float_dtype() elif issubdtype(obj, extended): return obj # type: ignore[return-value] elif isinstance(obj, type) and (f := _DEFAULT_TYPEMAP.get(obj)) is not None: obj = f() return np.dtype(obj, align=align, copy=copy) _DEFAULT_TYPEMAP: dict[type, Callable[[], np.dtype]] = { bool: lambda: np.dtype(bool), int: default_int_dtype, float: default_float_dtype, complex: default_complex_dtype, } def itemsize_bits(dtype: DTypeLike) -> int: """Number of bits per element for the dtype.""" # Note: we cannot use dtype.itemsize here because this is # incorrect for sub-byte integer types. if dtype is None: raise ValueError("dtype cannot be None.") if dtype == np.dtype(bool): return 8 # physical bit layout for boolean dtype elif issubdtype(dtype, np.integer): return iinfo(dtype).bits elif issubdtype(dtype, np.floating): return finfo(dtype).bits elif issubdtype(dtype, np.complexfloating): return 2 * finfo(dtype).bits else: raise ValueError(f"unexpected input: {dtype=}") # Trivial vectorspace datatype needed for tangent values of int/bool primals float0: np.dtype = np.dtype([('float0', np.void, 0)]) _dtype_to_32bit_dtype: dict[DType, DType] = { np.dtype('int64'): np.dtype('int32'), np.dtype('uint64'): np.dtype('uint32'), np.dtype('float64'): np.dtype('float32'), np.dtype('complex128'): np.dtype('complex64'), } # Note: we promote narrow types to float32 here for backward compatibility # with earlier approaches. We might consider revisiting this, or perhaps # tying the logic more closely to the type promotion lattice. _dtype_to_inexact: dict[DType, DType] = { np.dtype(k): np.dtype(v) for k, v in [ ('bool', 'float32'), ('uint4', 'float32'), ('int4', 'float32'), ('uint8', 'float32'), ('int8', 'float32'), ('uint16', 'float32'), ('int16', 'float32'), ('uint32', 'float32'), ('int32', 'float32'), ('uint64', 'float64'), ('int64', 'float64') ] } def to_numeric_dtype(dtype: DTypeLike) -> DType: """Promotes a dtype into an numeric dtype, if it is not already one.""" dtype_ = np.dtype(dtype) return np.dtype('int32') if dtype_ == np.dtype('bool') else dtype_ def to_inexact_dtype(dtype: DTypeLike) -> DType: """Promotes a dtype into an inexact dtype, if it is not already one.""" dtype_ = np.dtype(dtype) return _dtype_to_inexact.get(dtype_, dtype_) def to_floating_dtype(dtype: DTypeLike) -> DType: """Promotes a dtype to a non-complex floating dtype.""" dtype_ = np.dtype(dtype) return finfo(_dtype_to_inexact.get(dtype_, dtype_)).dtype def to_complex_dtype(dtype: DTypeLike) -> DType: ftype = to_inexact_dtype(dtype) if ftype in [np.dtype('float64'), np.dtype('complex128')]: return np.dtype('complex128') return np.dtype('complex64') @functools.cache def _canonicalize_dtype(x64_enabled: bool, allow_extended_dtype: bool, dtype: Any) -> DType | ExtendedDType: if issubdtype(dtype, extended): if not allow_extended_dtype: raise ValueError(f"Internal: canonicalize_dtype called on extended dtype {dtype} " "with allow_extended_dtype=False") return dtype try: dtype_ = np.dtype(dtype) except TypeError as e: raise TypeError(f'dtype {dtype!r} not understood') from e if x64_enabled: return dtype_ else: return _dtype_to_32bit_dtype.get(dtype_, dtype_) @overload def canonicalize_dtype(dtype: Any, allow_extended_dtype: Literal[False] = False) -> DType: ... @overload def canonicalize_dtype(dtype: Any, allow_extended_dtype: bool = False) -> DType | ExtendedDType: ... @export def canonicalize_dtype(dtype: Any, allow_extended_dtype: bool = False) -> DType | ExtendedDType: """Convert from a dtype to a canonical dtype based on config.x64_enabled.""" return _canonicalize_dtype(config.enable_x64.value, allow_extended_dtype, dtype) # pytype: disable=bad-return-type
ExtendedDType
python
pyqtgraph__pyqtgraph
pyqtgraph/examples/parametertree.py
{ "start": 1031, "end": 1946 }
class ____(pTypes.GroupParameter): def __init__(self, **opts): opts['type'] = 'bool' opts['value'] = True pTypes.GroupParameter.__init__(self, **opts) self.addChild({'name': 'A = 1/B', 'type': 'float', 'value': 7, 'suffix': 'Hz', 'siPrefix': True}) self.addChild({'name': 'B = 1/A', 'type': 'float', 'value': 1/7., 'suffix': 's', 'siPrefix': True}) self.a = self.param('A = 1/B') self.b = self.param('B = 1/A') self.a.sigValueChanged.connect(self.aChanged) self.b.sigValueChanged.connect(self.bChanged) def aChanged(self): self.b.setValue(1.0 / self.a.value(), blockSignal=self.bChanged) def bChanged(self): self.a.setValue(1.0 / self.b.value(), blockSignal=self.aChanged) ## test add/remove ## this group includes a menu allowing the user to add new parameters into its child list
ComplexParameter
python
pytorch__pytorch
torch/_dynamo/callback.py
{ "start": 1604, "end": 1695 }
class ____: callback_trigger: CallbackTrigger compile_id: str @dataclass
CallbackArgs
python
pytorch__pytorch
test/test_dataloader.py
{ "start": 18452, "end": 21589 }
class ____(TestCase): def test_concat_two_singletons(self): result = ConcatDataset([[0], [1]]) self.assertEqual(2, len(result)) self.assertEqual(0, result[0]) self.assertEqual(1, result[1]) def test_concat_two_non_singletons(self): result = ConcatDataset([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) self.assertEqual(10, len(result)) self.assertEqual(0, result[0]) self.assertEqual(5, result[5]) def test_concat_two_non_singletons_with_empty(self): # Adding an empty dataset somewhere is correctly handled result = ConcatDataset([[0, 1, 2, 3, 4], [], [5, 6, 7, 8, 9]]) self.assertEqual(10, len(result)) self.assertEqual(0, result[0]) self.assertEqual(5, result[5]) def test_concat_raises_index_error(self): result = ConcatDataset([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) with self.assertRaises(IndexError): # this one goes to 11 result[11] def test_add_dataset(self): d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7)) d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7)) d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7)) result = d1 + d2 + d3 self.assertEqual(21, len(result)) self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum()) self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum()) self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum()) def test_iterable_dataset_err(self): d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7)) it1 = CountingIterableDataset(5) it2 = CountingIterableDataset(10) with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"): ConcatDataset([d1, it2, it1]) with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"): ConcatDataset([it2]) with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"): ConcatDataset([it1, d1]) # takes in dummy var so this can also be used as a `worker_init_fn` def set_faulthander_if_available(_=None): faulthandler.enable(sys.__stderr__) if not IS_WINDOWS: # windows does not have faulthandler.register # chain=False prevents the default behavior of killing the process faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False) set_faulthander_if_available() # Process `pid` must have called `set_faulthander_if_available` def print_traces_of_all_threads(pid): if not IS_WINDOWS: # use the custom signal if available os.kill(pid, signal.SIGUSR1) else: # otherwise we can still use the handler given by faulthandler.enable() # at the cost of killing the process. os.kill(pid, signal.SIGSEGV) # wait in parent process to give subprocess some time to print time.sleep(5) # The following `ErrorTrackingProcess` stores the first encountered exception in # its `.exception` attribute. # Inspired by https://stackoverflow.com/a/33599967
TestConcatDataset
python
pyqtgraph__pyqtgraph
pyqtgraph/exceptionHandling.py
{ "start": 2164, "end": 6429 }
class ____(object): def __init__(self): self.orig_sys_excepthook = sys.excepthook self.orig_threading_excepthook = threading.excepthook sys.excepthook = self.sys_excepthook threading.excepthook = self.threading_excepthook def remove(self): """Restore original exception hooks, deactivating this exception handler """ sys.excepthook = self.orig_sys_excepthook threading.excepthook = self.orig_threading_excepthook def sys_excepthook(self, *args): # sys.excepthook signature is (exc_type, exc_value, exc_traceback) args = SimpleNamespace(exc_type=args[0], exc_value=args[1], exc_traceback=args[2], thread=None) return self._excepthook(args, use_thread_hook=False) def threading_excepthook(self, args): # threading.excepthook signature is (namedtuple(exc_type, exc_value, exc_traceback, thread)) return self._excepthook(args, use_thread_hook=True) def _excepthook(self, args, use_thread_hook): ## Start by extending recursion depth just a bit. ## If the error we are catching is due to recursion, we don't want to generate another one here. recursionLimit = sys.getrecursionlimit() try: sys.setrecursionlimit(recursionLimit+100) ## call original exception handler first (prints exception) global callbacks, clear_tracebacks header = "===== %s =====" % str(time.strftime("%Y.%m.%d %H:%m:%S", time.localtime(time.time()))) try: print(header) except Exception: sys.stderr.write("Warning: stdout is broken! Falling back to stderr.\n") sys.stdout = sys.stderr if use_thread_hook: ret = self.orig_threading_excepthook(args) else: ret = self.orig_sys_excepthook(args.exc_type, args.exc_value, args.exc_traceback) for cb in callbacks: try: cb(args) except Exception: print(" --------------------------------------------------------------") print(" Error occurred during exception callback %s" % str(cb)) print(" --------------------------------------------------------------") traceback.print_exception(*sys.exc_info()) # deprecated callback style requiring 3 args for cb in old_callbacks: try: cb(args.exc_type, args.exc_value, args.exc_traceback) except Exception: print(" --------------------------------------------------------------") print(" Error occurred during exception callback %s" % str(cb)) print(" --------------------------------------------------------------") traceback.print_exception(*sys.exc_info()) ## Clear long-term storage of last traceback to prevent memory-hogging. ## (If an exception occurs while a lot of data is present on the stack, ## such as when loading large files, the data would ordinarily be kept ## until the next exception occurs. We would rather release this memory ## as soon as possible.) if clear_tracebacks is True: sys.last_traceback = None return ret finally: sys.setrecursionlimit(recursionLimit) def implements(self, interface=None): ## this just makes it easy for us to detect whether an ExceptionHook is already installed. if interface is None: return ['ExceptionHandler'] else: return interface == 'ExceptionHandler' ## replace built-in excepthook only if this has not already been done # TODO this will never return False; the hook is set to the bound sys_excepthook method, not the instance itself if not (hasattr(sys.excepthook, 'implements') and sys.excepthook.implements('ExceptionHandler')): handler = ExceptionHandler() original_excepthook = handler.orig_sys_excepthook original_threading_excepthook = handler.orig_threading_excepthook
ExceptionHandler
python
pandas-dev__pandas
pandas/tests/series/indexing/test_setitem.py
{ "start": 37711, "end": 39035 }
class ____(SetitemCastingEquivalents): # GH#44201 Cast to shared IntervalDtype rather than object def test_setitem_example(self): # Just a case here to make obvious what this test class is aimed at idx = IntervalIndex.from_breaks(range(4)) obj = Series(idx) val = Interval(0.5, 1.5) with pytest.raises(TypeError, match="Invalid value"): obj[0] = val @pytest.fixture def obj(self): """ Fixture to create a Series [(0, 1], (1, 2], (2, 3]] """ idx = IntervalIndex.from_breaks(range(4)) return Series(idx) @pytest.fixture def val(self): """ Fixture to get an interval (0.5, 1.5] """ return Interval(0.5, 1.5) @pytest.fixture def key(self): """ Fixture to get a key 0 """ return 0 @pytest.fixture def expected(self, obj, val): """ Fixture to get a Series [(0.5, 1.5], (1.0, 2.0], (2.0, 3.0]] """ data = [val] + list(obj[1:]) idx = IntervalIndex(data, dtype="Interval[float64]") return Series(idx) @pytest.fixture def raises(self): """ Fixture to enable raising pytest exceptions """ return True
TestSetitemFloatIntervalWithIntIntervalValues
python
bottlepy__bottle
bottle.py
{ "start": 76981, "end": 77879 }
class ____(Response, BottleException): """ A subclass of :class:`Response` that can be raised or returned from request handlers to short-curcuit request processing and override changes made to the global :data:`request` object. This bypasses error handlers, even if the status code indicates an error. Return or raise :class:`HTTPError` to trigger error handlers. """ def __init__(self, body='', status=None, headers=None, **more_headers): super(HTTPResponse, self).__init__(body, status, headers, **more_headers) def apply(self, other): """ Copy the state of this response to a different :class:`Response` object. """ other._status_code = self._status_code other._status_line = self._status_line other._headers = self._headers other._cookies = self._cookies other.body = self.body
HTTPResponse
python
wandb__wandb
wandb/sdk/internal/_generated/server_features_query.py
{ "start": 453, "end": 655 }
class ____(GQLResult): name: str is_enabled: bool = Field(alias="isEnabled") ServerFeaturesQuery.model_rebuild() ServerFeaturesQueryServerInfo.model_rebuild()
ServerFeaturesQueryServerInfoFeatures
python
jazzband__django-model-utils
tests/test_fields/test_field_tracker.py
{ "start": 12339, "end": 15309 }
class ____(FieldTrackerCommonMixin, TestCase): tracked_class: type[TrackedNotDefault | ModelTrackedNotDefault] = TrackedNotDefault instance: TrackedNotDefault | ModelTrackedNotDefault def setUp(self) -> None: self.instance = self.tracked_class() self.tracker = self.instance.name_tracker def test_pre_save_changed(self) -> None: self.assertChanged(name=None) self.instance.name = 'new age' self.assertChanged(name=None) self.instance.number = 8 self.assertChanged(name=None) self.instance.name = '' self.assertChanged(name=None) def test_first_save(self) -> None: self.assertHasChanged(name=True, number=None) self.assertPrevious(name=None, number=None) self.assertCurrent(name='') self.assertChanged(name=None) self.instance.name = 'retro' self.instance.number = 4 self.assertHasChanged(name=True, number=None) self.assertPrevious(name=None, number=None) self.assertCurrent(name='retro') self.assertChanged(name=None) def test_pre_save_has_changed(self) -> None: self.assertHasChanged(name=True, number=None) self.instance.name = 'new age' self.assertHasChanged(name=True, number=None) self.instance.number = 7 self.assertHasChanged(name=True, number=None) def test_post_save_has_changed(self) -> None: self.update_instance(name='retro', number=4) self.assertHasChanged(name=False, number=None) self.instance.name = 'new age' self.assertHasChanged(name=True, number=None) self.instance.number = 8 self.assertHasChanged(name=True, number=None) self.instance.name = 'retro' self.assertHasChanged(name=False, number=None) def test_post_save_previous(self) -> None: self.update_instance(name='retro', number=4) self.instance.name = 'new age' self.assertPrevious(name='retro', number=None) def test_post_save_changed(self) -> None: self.update_instance(name='retro', number=4) self.assertChanged() self.instance.name = 'new age' self.assertChanged(name='retro') self.instance.number = 8 self.assertChanged(name='retro') self.instance.name = 'retro' self.assertChanged() def test_current(self) -> None: self.assertCurrent(name='') self.instance.name = 'new age' self.assertCurrent(name='new age') self.instance.number = 8 self.assertCurrent(name='new age') self.instance.save() self.assertCurrent(name='new age') def test_update_fields(self) -> None: self.update_instance(name='retro', number=4) self.assertChanged() self.instance.name = 'new age' self.instance.number = 8 self.instance.save(update_fields=['name', 'number']) self.assertChanged()
FieldTrackedModelCustomTests
python
huggingface__transformers
src/transformers/models/bert_japanese/tokenization_bert_japanese.py
{ "start": 16684, "end": 20675 }
class ____: """Runs basic tokenization with Sudachi morphological parser.""" def __init__( self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False, sudachi_split_mode="A", sudachi_config_path=None, sudachi_resource_dir=None, sudachi_dict_type="core", sudachi_projection=None, ): """ Constructs a SudachiTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **trim_whitespace**: (*optional*) boolean (default False) Whether to trim all whitespace, tab, newline from tokens. **sudachi_split_mode**: (*optional*) string Split mode of sudachi, choose from `["A", "B", "C"]`. **sudachi_config_path**: (*optional*) string **sudachi_resource_dir**: (*optional*) string **sudachi_dict_type**: (*optional*) string dict type of sudachi, choose from `["small", "core", "full"]`. **sudachi_projection**: (*optional*) string Word projection mode of sudachi, choose from `["surface", "normalized", "reading", "dictionary", "dictionary_and_surface", "normalized_and_surface", "normalized_nouns"]`. """ self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.trim_whitespace = trim_whitespace try: from sudachipy import dictionary, tokenizer except ImportError: raise ImportError( "You need to install sudachipy to use SudachiTokenizer. " "See https://github.com/WorksApplications/SudachiPy for installation." ) if sudachi_split_mode == "A": self.split_mode = tokenizer.Tokenizer.SplitMode.A elif sudachi_split_mode == "B": self.split_mode = tokenizer.Tokenizer.SplitMode.B elif sudachi_split_mode == "C": self.split_mode = tokenizer.Tokenizer.SplitMode.C else: raise ValueError("Invalid sudachi_split_mode is specified.") self.projection = sudachi_projection sudachi_dictionary = dictionary.Dictionary( config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type ) if is_sudachi_projection_available(): self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection) elif self.projection is not None: raise ImportError("You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.") else: self.sudachi = sudachi_dictionary.create(self.split_mode) def tokenize(self, text, never_split=None, **kwargs): """Tokenizes a piece of text.""" if self.normalize_text: text = unicodedata.normalize("NFKC", text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.sudachi.tokenize(text): token = word.surface() if self.do_lower_case and token not in never_split: token = token.lower() if self.trim_whitespace: if token.strip() == "": continue else: token = token.strip() tokens.append(token) return tokens
SudachiTokenizer
python
getsentry__sentry
src/sentry/sentry_apps/api/bases/sentryapps.py
{ "start": 7521, "end": 9823 }
class ____(SentryPermission): unpublished_scope_map = { "GET": ("org:read", "org:integrations", "org:write", "org:admin"), "PUT": ("org:write", "org:admin"), "POST": ("org:admin",), # used for publishing an app "DELETE": ("org:admin",), } published_scope_map = scope_map = { "GET": PARANOID_GET, "PUT": ("org:write", "org:admin"), "POST": ("org:admin",), "DELETE": ("org:admin",), } def has_object_permission(self, request: Request, view, sentry_app: RpcSentryApp | SentryApp): if not hasattr(request, "user") or not request.user: return False owner_app = organization_service.get_organization_by_id( id=sentry_app.owner_id, user_id=request.user.id ) assert owner_app, f"owner organization for {sentry_app.name} was not found" self.determine_access(request, owner_app) if superuser_has_permission(request): return True organizations = ( user_service.get_organizations(user_id=request.user.id) if request.user.id is not None else () ) # if app is unpublished, user must be in the Org who owns the app. if not sentry_app.is_published: if not any(sentry_app.owner_id == org.id for org in organizations): raise SentryAppError( message="User must be in the app owner's organization for unpublished apps", status_code=403, public_context={ "user_organizations": [org.slug for org in organizations], }, ) # TODO(meredith): make a better way to allow for public # endpoints. we can't use ensure_scoped_permission now # that the public endpoint isn't denoted by '()' if sentry_app.is_published and request.method == "GET": return True return ensure_scoped_permission( request, self._scopes_for_sentry_app(sentry_app).get(request.method) ) def _scopes_for_sentry_app(self, sentry_app): if sentry_app.is_published: return self.published_scope_map else: return self.unpublished_scope_map
SentryAppPermission
python
sympy__sympy
sympy/functions/elementary/trigonometric.py
{ "start": 64414, "end": 67706 }
class ____(DefinedFunction): r""" Represents an unnormalized sinc function: .. math:: \operatorname{sinc}(x) = \begin{cases} \frac{\sin x}{x} & \qquad x \neq 0 \\ 1 & \qquad x = 0 \end{cases} Examples ======== >>> from sympy import sinc, oo, jn >>> from sympy.abc import x >>> sinc(x) sinc(x) * Automated Evaluation >>> sinc(0) 1 >>> sinc(oo) 0 * Differentiation >>> sinc(x).diff() cos(x)/x - sin(x)/x**2 * Series Expansion >>> sinc(x).series() 1 - x**2/6 + x**4/120 + O(x**6) * As zero'th order spherical Bessel Function >>> sinc(x).rewrite(jn) jn(0, x) See also ======== sympy.functions.elementary.trigonometric.sin References ========== .. [1] https://en.wikipedia.org/wiki/Sinc_function """ _singularities = (S.ComplexInfinity,) def fdiff(self, argindex=1): x = self.args[0] if argindex == 1: # We would like to return the Piecewise here, but Piecewise.diff # currently can't handle removable singularities, meaning things # like sinc(x).diff(x, 2) give the wrong answer at x = 0. See # https://github.com/sympy/sympy/issues/11402. # # return Piecewise(((x*cos(x) - sin(x))/x**2, Ne(x, S.Zero)), (S.Zero, S.true)) return cos(x)/x - sin(x)/x**2 else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): if arg.is_zero: return S.One if arg.is_Number: if arg in [S.Infinity, S.NegativeInfinity]: return S.Zero elif arg is S.NaN: return S.NaN if arg is S.ComplexInfinity: return S.NaN if arg.could_extract_minus_sign(): return cls(-arg) pi_coeff = _pi_coeff(arg) if pi_coeff is not None: if pi_coeff.is_integer: if fuzzy_not(arg.is_zero): return S.Zero elif (2*pi_coeff).is_integer: return S.NegativeOne**(pi_coeff - S.Half)/arg def _eval_nseries(self, x, n, logx, cdir=0): x = self.args[0] return (sin(x)/x)._eval_nseries(x, n, logx) def _eval_rewrite_as_jn(self, arg, **kwargs): from sympy.functions.special.bessel import jn return jn(0, arg) def _eval_rewrite_as_sin(self, arg, **kwargs): return Piecewise((sin(arg)/arg, Ne(arg, S.Zero)), (S.One, S.true)) def _eval_is_zero(self): if self.args[0].is_infinite: return True rest, pi_mult = _peeloff_pi(self.args[0]) if rest.is_zero: return fuzzy_and([pi_mult.is_integer, pi_mult.is_nonzero]) if rest.is_Number and pi_mult.is_integer: return False def _eval_is_real(self): if self.args[0].is_extended_real or self.args[0].is_imaginary: return True _eval_is_finite = _eval_is_real ############################################################################### ########################### TRIGONOMETRIC INVERSES ############################ ###############################################################################
sinc
python
run-llama__llama_index
llama-index-core/llama_index/core/evaluation/dataset_generation.py
{ "start": 3386, "end": 12351 }
class ____(PromptMixin): """Generate dataset (question/ question-answer pairs) \ based on the given documents. NOTE: this is a beta feature, subject to change! Args: nodes (List[Node]): List of nodes. (Optional) llm (LLM): Language model. callback_manager (CallbackManager): Callback manager. num_questions_per_chunk: number of question to be \ generated per chunk. Each document is chunked of size 512 words. text_question_template: Question generation template. question_gen_query: Question generation query. """ def __init__( self, nodes: List[BaseNode], llm: Optional[LLM] = None, callback_manager: Optional[CallbackManager] = None, num_questions_per_chunk: int = 10, text_question_template: BasePromptTemplate | None = None, text_qa_template: BasePromptTemplate | None = None, question_gen_query: str | None = None, metadata_mode: MetadataMode = MetadataMode.NONE, show_progress: bool = False, ) -> None: """Init params.""" self.llm = llm or Settings.llm self.callback_manager = callback_manager or Settings.callback_manager self.text_question_template = text_question_template or PromptTemplate( DEFAULT_QUESTION_GENERATION_PROMPT ) self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT self.question_gen_query = ( question_gen_query or f"You are a Teacher/Professor. Your task is to setup \ {num_questions_per_chunk} questions for an upcoming \ quiz/examination. The questions should be diverse in nature \ across the document. Restrict the questions to the \ context information provided." ) self.nodes = nodes self._metadata_mode = metadata_mode self._show_progress = show_progress @classmethod def from_documents( cls, documents: List[Document], llm: Optional[LLM] = None, transformations: Optional[List[TransformComponent]] = None, callback_manager: Optional[CallbackManager] = None, num_questions_per_chunk: int = 10, text_question_template: BasePromptTemplate | None = None, text_qa_template: BasePromptTemplate | None = None, question_gen_query: str | None = None, required_keywords: List[str] | None = None, exclude_keywords: List[str] | None = None, show_progress: bool = False, ) -> DatasetGenerator: """Generate dataset from documents.""" llm = llm or Settings.llm transformations = transformations or Settings.transformations callback_manager = callback_manager or Settings.callback_manager nodes = run_transformations( documents, transformations, show_progress=show_progress ) # use node postprocessor to filter nodes required_keywords = required_keywords or [] exclude_keywords = exclude_keywords or [] node_postprocessor = KeywordNodePostprocessor( callback_manager=callback_manager, required_keywords=required_keywords, exclude_keywords=exclude_keywords, ) node_with_scores = [NodeWithScore(node=node) for node in nodes] node_with_scores = node_postprocessor.postprocess_nodes(node_with_scores) nodes = [node_with_score.node for node_with_score in node_with_scores] return cls( nodes=nodes, llm=llm, callback_manager=callback_manager, num_questions_per_chunk=num_questions_per_chunk, text_question_template=text_question_template, text_qa_template=text_qa_template, question_gen_query=question_gen_query, show_progress=show_progress, ) async def _agenerate_dataset( self, nodes: List[BaseNode], num: int | None = None, generate_response: bool = False, ) -> QueryResponseDataset: """Node question generator.""" query_tasks: List[Coroutine] = [] queries: Dict[str, str] = {} responses_dict: Dict[str, str] = {} if self._show_progress: from tqdm.asyncio import tqdm_asyncio async_module = tqdm_asyncio else: async_module = asyncio summary_indices: List[SummaryIndex] = [] for node in nodes: if num is not None and len(query_tasks) >= num: break index = SummaryIndex.from_documents( [ Document( text=node.get_content(metadata_mode=self._metadata_mode), metadata=node.metadata, # type: ignore ) ], callback_manager=self.callback_manager, ) query_engine = index.as_query_engine( llm=self.llm, text_qa_template=self.text_question_template, use_async=True, ) task = query_engine.aquery( self.question_gen_query, ) query_tasks.append(task) summary_indices.append(index) responses = await async_module.gather(*query_tasks) for idx, response in enumerate(responses): result = str(response).strip().split("\n") cleaned_questions = [ re.sub(r"^\d+[\).\s]", "", question).strip() for question in result ] cleaned_questions = [ question for question in cleaned_questions if len(question) > 0 ] cur_queries = { str(uuid.uuid4()): question for question in cleaned_questions } queries.update(cur_queries) if generate_response: index = summary_indices[idx] qr_tasks = [] cur_query_items = list(cur_queries.items()) cur_query_keys = [query_id for query_id, _ in cur_query_items] for query_id, query in cur_query_items: qa_query_engine = index.as_query_engine( llm=self.llm, text_qa_template=self.text_qa_template, ) qr_task = qa_query_engine.aquery(query) qr_tasks.append(qr_task) qr_responses = await async_module.gather(*qr_tasks) for query_id, qa_response in zip(cur_query_keys, qr_responses): responses_dict[query_id] = str(qa_response) else: pass query_ids = list(queries.keys()) if num is not None: query_ids = query_ids[:num] # truncate queries, responses to the subset of query ids queries = {query_id: queries[query_id] for query_id in query_ids} if generate_response: responses_dict = { query_id: responses_dict[query_id] for query_id in query_ids } return QueryResponseDataset(queries=queries, responses=responses_dict) async def agenerate_questions_from_nodes(self, num: int | None = None) -> List[str]: """Generates questions for each document.""" dataset = await self._agenerate_dataset( self.nodes, num=num, generate_response=False ) return dataset.questions async def agenerate_dataset_from_nodes( self, num: int | None = None ) -> QueryResponseDataset: """Generates questions for each document.""" return await self._agenerate_dataset( self.nodes, num=num, generate_response=True ) def generate_questions_from_nodes(self, num: int | None = None) -> List[str]: """Generates questions for each document.""" return asyncio_run(self.agenerate_questions_from_nodes(num=num)) def generate_dataset_from_nodes( self, num: int | None = None ) -> QueryResponseDataset: """Generates questions for each document.""" return asyncio_run(self.agenerate_dataset_from_nodes(num=num)) def _get_prompts(self) -> PromptDictType: """Get prompts.""" return { "text_question_template": self.text_question_template, "text_qa_template": self.text_qa_template, } def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "text_question_template" in prompts: self.text_question_template = prompts["text_question_template"] if "text_qa_template" in prompts: self.text_qa_template = prompts["text_qa_template"]
DatasetGenerator
python
neetcode-gh__leetcode
python/0146-lru-cache.py
{ "start": 0, "end": 123 }
class ____: def __init__(self, key, val): self.key, self.val = key, val self.prev = self.next = None
Node
python
apache__thrift
lib/py/src/server/TNonblockingServer.py
{ "start": 2671, "end": 2926 }
class ____(object): def __init__(self, offset, len_, header): self.offset = offset self.len = len_ self.buffer = None self.is_header = header @property def end(self): return self.offset + self.len
Message
python
mlflow__mlflow
mlflow/protos/unity_catalog_prompt_service_pb2_grpc.py
{ "start": 8135, "end": 19975 }
class ____(object): """Missing associated documentation comment in .proto file.""" def CreatePrompt(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdatePrompt(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePrompt(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetPrompt(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SearchPrompts(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreatePromptVersion(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdatePromptVersion(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePromptVersion(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetPromptVersion(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SearchPromptVersions(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetPromptAlias(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePromptAlias(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetPromptVersionByAlias(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetPromptTag(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePromptTag(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetPromptVersionTag(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeletePromptVersionTag(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_UnityCatalogPromptServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'CreatePrompt': grpc.unary_unary_rpc_method_handler( servicer.CreatePrompt, request_deserializer=unity__catalog__prompt__messages__pb2.CreatePromptRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.CreatePromptResponse.SerializeToString, ), 'UpdatePrompt': grpc.unary_unary_rpc_method_handler( servicer.UpdatePrompt, request_deserializer=unity__catalog__prompt__messages__pb2.UpdatePromptRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.UpdatePromptResponse.SerializeToString, ), 'DeletePrompt': grpc.unary_unary_rpc_method_handler( servicer.DeletePrompt, request_deserializer=unity__catalog__prompt__messages__pb2.DeletePromptRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.DeletePromptResponse.SerializeToString, ), 'GetPrompt': grpc.unary_unary_rpc_method_handler( servicer.GetPrompt, request_deserializer=unity__catalog__prompt__messages__pb2.GetPromptRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.GetPromptResponse.SerializeToString, ), 'SearchPrompts': grpc.unary_unary_rpc_method_handler( servicer.SearchPrompts, request_deserializer=unity__catalog__prompt__messages__pb2.SearchPromptsRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.SearchPromptsResponse.SerializeToString, ), 'CreatePromptVersion': grpc.unary_unary_rpc_method_handler( servicer.CreatePromptVersion, request_deserializer=unity__catalog__prompt__messages__pb2.CreatePromptVersionRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.CreatePromptVersionResponse.SerializeToString, ), 'UpdatePromptVersion': grpc.unary_unary_rpc_method_handler( servicer.UpdatePromptVersion, request_deserializer=unity__catalog__prompt__messages__pb2.UpdatePromptVersionRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.UpdatePromptVersionResponse.SerializeToString, ), 'DeletePromptVersion': grpc.unary_unary_rpc_method_handler( servicer.DeletePromptVersion, request_deserializer=unity__catalog__prompt__messages__pb2.DeletePromptVersionRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.DeletePromptVersionResponse.SerializeToString, ), 'GetPromptVersion': grpc.unary_unary_rpc_method_handler( servicer.GetPromptVersion, request_deserializer=unity__catalog__prompt__messages__pb2.GetPromptVersionRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.GetPromptVersionResponse.SerializeToString, ), 'SearchPromptVersions': grpc.unary_unary_rpc_method_handler( servicer.SearchPromptVersions, request_deserializer=unity__catalog__prompt__messages__pb2.SearchPromptVersionsRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.SearchPromptVersionsResponse.SerializeToString, ), 'SetPromptAlias': grpc.unary_unary_rpc_method_handler( servicer.SetPromptAlias, request_deserializer=unity__catalog__prompt__messages__pb2.SetPromptAliasRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.SetPromptAliasResponse.SerializeToString, ), 'DeletePromptAlias': grpc.unary_unary_rpc_method_handler( servicer.DeletePromptAlias, request_deserializer=unity__catalog__prompt__messages__pb2.DeletePromptAliasRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.DeletePromptAliasResponse.SerializeToString, ), 'GetPromptVersionByAlias': grpc.unary_unary_rpc_method_handler( servicer.GetPromptVersionByAlias, request_deserializer=unity__catalog__prompt__messages__pb2.GetPromptVersionByAliasRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.GetPromptVersionByAliasResponse.SerializeToString, ), 'SetPromptTag': grpc.unary_unary_rpc_method_handler( servicer.SetPromptTag, request_deserializer=unity__catalog__prompt__messages__pb2.SetPromptTagRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.SetPromptTagResponse.SerializeToString, ), 'DeletePromptTag': grpc.unary_unary_rpc_method_handler( servicer.DeletePromptTag, request_deserializer=unity__catalog__prompt__messages__pb2.DeletePromptTagRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.DeletePromptTagResponse.SerializeToString, ), 'SetPromptVersionTag': grpc.unary_unary_rpc_method_handler( servicer.SetPromptVersionTag, request_deserializer=unity__catalog__prompt__messages__pb2.SetPromptVersionTagRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.SetPromptVersionTagResponse.SerializeToString, ), 'DeletePromptVersionTag': grpc.unary_unary_rpc_method_handler( servicer.DeletePromptVersionTag, request_deserializer=unity__catalog__prompt__messages__pb2.DeletePromptVersionTagRequest.FromString, response_serializer=unity__catalog__prompt__messages__pb2.DeletePromptVersionTagResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'mlflow.unitycatalog.UnityCatalogPromptService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) server.add_registered_method_handlers('mlflow.unitycatalog.UnityCatalogPromptService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API.
UnityCatalogPromptServiceServicer
python
google__jax
jax/experimental/jax2tf/tests/converters.py
{ "start": 883, "end": 1998 }
class ____: name: str convert_fn: Callable[..., Any] compare_numerics: bool = True def jax2tf_convert(harness: ModelHarness, enable_xla: bool = True): return jax2tf.convert( harness.apply_with_vars, enable_xla=enable_xla, polymorphic_shapes=harness.polymorphic_shapes) def jax2tfjs(harness: ModelHarness): """Converts the given `test_case` using the TFjs converter.""" with tempfile.TemporaryDirectory() as model_dir: tfjs.converters.convert_jax( apply_fn=harness.apply, params=harness.variables, input_signatures=harness.tf_input_signature, polymorphic_shapes=harness.polymorphic_shapes, model_dir=model_dir) ALL_CONVERTERS = [ # jax2tf with XLA support (enable_xla=True). Converter(name='jax2tf_xla', convert_fn=jax2tf_convert), # jax2tf without XLA support (enable_xla=False). Converter( name='jax2tf_noxla', convert_fn=functools.partial(jax2tf_convert, enable_xla=False), ), # Convert JAX to Tensorflow.JS. Converter(name='jax2tfjs', convert_fn=jax2tfjs, compare_numerics=False), ]
Converter
python
redis__redis-py
redis/asyncio/multidb/database.py
{ "start": 308, "end": 1083 }
class ____(AbstractDatabase): """Database with an underlying asynchronous redis client.""" @property @abstractmethod def client(self) -> Union[Redis, RedisCluster]: """The underlying redis client.""" pass @client.setter @abstractmethod def client(self, client: Union[Redis, RedisCluster]): """Set the underlying redis client.""" pass @property @abstractmethod def circuit(self) -> CircuitBreaker: """Circuit breaker for the current database.""" pass @circuit.setter @abstractmethod def circuit(self, circuit: CircuitBreaker): """Set the circuit breaker for the current database.""" pass Databases = WeightedList[tuple[AsyncDatabase, Number]]
AsyncDatabase
python
doocs__leetcode
lcci/03.01.Three in One/Solution.py
{ "start": 0, "end": 1118 }
class ____: def __init__(self, stackSize: int): self.cap = stackSize self.stk = [0] * (self.cap * 3 + 3) def push(self, stackNum: int, value: int) -> None: if self.stk[self.cap * 3 + stackNum] < self.cap: self.stk[self.cap * stackNum + self.stk[self.cap * 3 + stackNum]] = value self.stk[self.cap * 3 + stackNum] += 1 def pop(self, stackNum: int) -> int: if self.isEmpty(stackNum): return -1 self.stk[self.cap * 3 + stackNum] -= 1 return self.stk[self.cap * stackNum + self.stk[self.cap * 3 + stackNum]] def peek(self, stackNum: int) -> int: if self.isEmpty(stackNum): return -1 return self.stk[self.cap * stackNum + self.stk[self.cap * 3 + stackNum] - 1] def isEmpty(self, stackNum: int) -> bool: return self.stk[self.cap * 3 + stackNum] == 0 # Your TripleInOne object will be instantiated and called as such: # obj = TripleInOne(stackSize) # obj.push(stackNum,value) # param_2 = obj.pop(stackNum) # param_3 = obj.peek(stackNum) # param_4 = obj.isEmpty(stackNum)
TripleInOne
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/decorator7.py
{ "start": 329, "end": 493 }
class ____(Generic[T]): def __init__(self, value: T) -> None: self._value: T = value def __call__(self) -> T: return self._value
ValueDecorated
python
numpy__numpy
numpy/linalg/tests/test_linalg.py
{ "start": 14308, "end": 14939 }
class ____(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): # kept apart from TestSolve for use for testing with matrices. def do(self, a, b, tags): x = linalg.solve(a, b) if np.array(b).ndim == 1: # When a is (..., M, M) and b is (M,), it is the same as when b is # (M, 1), except the result has shape (..., M) adotx = matmul(a, x[..., None])[..., 0] assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx) else: adotx = matmul(a, x) assert_almost_equal(b, adotx) assert_(consistent_subclass(x, b))
SolveCases
python
pytorch__pytorch
torch/distributed/checkpoint/_async_executor.py
{ "start": 361, "end": 1209 }
class ____(abc.ABC): @abc.abstractmethod def execute_save( self, staging_future_or_state_dict: Union[STATE_DICT_TYPE, Future[STATE_DICT_TYPE]], *, checkpoint_id: Union[str, os.PathLike, None] = None, storage_writer: Optional[StorageWriter] = None, planner: Optional[SavePlanner] = None, process_group: Optional[dist.ProcessGroup] = None, no_dist: bool = False, use_collectives: bool = True, ) -> Future: """ Execute the checkpoint save request asynchronously. This method is intended to be used as an abstraction for implementing async checkpointing. The actual checkpoint save operation is executed in a separate thread or process depending on the implementation of this interface. """
_AsyncCheckpointExecutor
python
django-import-export__django-import-export
tests/core/tests/test_resources/test_modelresource/test_resource_fields.py
{ "start": 6308, "end": 8459 }
class ____(TestCase): """ Model Resources should warn on ignored declared fields, but only if they are declared on the current class. Ref: #2017 """ class _BaseBookResource(resources.ModelResource): name = fields.Field( attribute="name", ) imported = fields.Field( attribute="imported", ) price = fields.Field( attribute="price", ) class Meta: model = Book fields = ( "name", "imported", "price", ) def setUp(self): # Enable warnings for this test warnings.simplefilter("default") def test_no_warnings_defined_fields(self): """ A subclass that lists a subset of the parents defined fields should receive no warnings. """ with warnings.catch_warnings(record=True) as w: class _Export1BookResource(self._BaseBookResource): class Meta: fields = ( "name", "imported", ) # expect no warnings self.assertEqual(len(w), 0) def test_declared_field_expect_warning(self): """ A class that defines a field, but doesn't list it in fields should receive a warning with the name of the field and the class name """ with warnings.catch_warnings(record=True) as w: class _Export2BookResource(self._BaseBookResource): published = fields.Field(attribute="published") published_time = fields.Field(attribute="published_time") class Meta: fields = ("author_email", "imported", "published_time") self.assertEqual(len(w), 1) self.assertIs(w[0].category, UserWarning) self.assertIn( "_Export2BookResource: ignoring field 'published' because not" " declared in 'fields' whitelist", str(w[0].message), )
ModelResourceUnusedFieldWarnings
python
sphinx-doc__sphinx
sphinx/builders/latex/nodes.py
{ "start": 526, "end": 659 }
class ____(nodes.Inline, nodes.Referential, nodes.TextElement): """A node for a reference for equation.""" pass
math_reference
python
pytorch__pytorch
test/cpp/jit/tests_setup.py
{ "start": 1361, "end": 2608 }
class ____(FileSetup): path = "saved_stream_model.pt" def setup(self): if not torch.cuda.is_available(): return class Model(torch.nn.Module): def forward(self): s = torch.cuda.Stream() a = torch.rand(3, 4, device="cuda") b = torch.rand(3, 4, device="cuda") with torch.cuda.stream(s): is_stream_s = ( torch.cuda.current_stream(s.device_index()).id() == s.id() ) c = torch.cat((a, b), 0).to("cuda") s.synchronize() return is_stream_s, a, b, c model = Model() # Script the model and save script_model = torch.jit.script(model) torch.jit.save(script_model, self.path) tests = [ EvalModeForLoadedModule(), SerializationInterop(), TorchSaveError(), TorchSaveJitStream_CUDA(), ] def setup(): for test in tests: test.setup() def shutdown(): for test in tests: test.shutdown() if __name__ == "__main__": command = sys.argv[1] if command == "setup": setup() elif command == "shutdown": shutdown()
TorchSaveJitStream_CUDA
python
scipy__scipy
scipy/signal/tests/test_filter_design.py
{ "start": 30105, "end": 44831 }
class ____: def test_ticket1441(self, xp): """Regression test for ticket 1441.""" # Because freqz previously used arange instead of linspace, # when N was large, it would return one more point than # requested. N = 100000 w, h = freqz(xp.asarray([1.0]), worN=N) assert w.shape == (N,) def test_gh_22886(self, xp): w, h = freqz(xp.asarray([1.]), worN=xp.asarray([0., 0.1])) xp_assert_equal(w, xp.asarray([0. , 0.1])) xp_assert_equal(h, xp.asarray([1.+0.j, 1.+0.j])) def test_basic(self, xp): w, h = freqz(xp.asarray([1.0]), worN=8) assert_array_almost_equal(w, xp.pi * xp.arange(8, dtype=w.dtype) / 8.) assert_array_almost_equal(h, xp.ones(8)) w, h = freqz(xp.asarray([1.0]), worN=9) assert_array_almost_equal(w, xp.pi * xp.arange(9, dtype=w.dtype) / 9.) assert_array_almost_equal(h, xp.ones(9)) for a in [1, xp.ones(2)]: w, h = freqz(xp.ones(2), a, worN=0) assert w.shape == (0,) assert h.shape == (0,) hdt = xp.complex128 if xp_default_dtype(xp) == xp.float64 else xp.complex64 assert h.dtype == hdt def test_basic2(self, xp): t = xp.linspace(0, 1, 4, endpoint=False) for b, a, h_whole in zip( (xp.asarray([1., 0, 0, 0]), xp.sin(2 * xp.pi * t)), (xp.asarray([1., 0, 0, 0]), xp.asarray([0.5, 0, 0, 0])), (xp.asarray([1., 1., 1., 1.]), xp.asarray([0, -4j, 0, 4j])) ): w, h = freqz(b, a, worN=4, whole=True) expected_w = xp.linspace(0, 2 * xp.pi, 4, endpoint=False) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) if is_numpy(xp): # simultaneously check int-like support w, h = freqz(b, a, worN=np.int32(4), whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) w, h = freqz(b, a, worN=w, whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) def test_basic3(self): t = np.linspace(0, 1, 4, endpoint=False) expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False) for b, a, h_whole in zip( (np.asarray([1., 0, 0, 0]), np.sin(2 * np.pi * t)), (np.asarray([1., 0, 0, 0]), np.asarray([0.5, 0, 0, 0])), (np.asarray([1., 1., 1., 1.]), np.asarray([0, -4j, 0, 4j])) ): w, h = freqz(b, a, worN=np.int32(4), whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) w, h = freqz(b, a, worN=w, whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) def test_basic_whole(self, xp): w, h = freqz(xp.asarray([1.0]), worN=8, whole=True) assert_array_almost_equal(w, 2 * xp.pi * xp.arange(8.0) / 8) assert_array_almost_equal(h, xp.ones(8)) def test_plot(self, xp): def plot(w, h): assert_array_almost_equal(w, xp.pi * xp.arange(8.0) / 8) assert_array_almost_equal(h, xp.ones(8)) with assert_raises(ZeroDivisionError): freqz(xp.asarray([1.0]), worN=8, plot=lambda w, h: 1 / 0) freqz(xp.asarray([1.0]), worN=8, plot=plot) def test_fft_wrapping(self, xp): # Some simple real FIR filters bs = list() # filters as_ = list() hs_whole = list() hs_half = list() # 3 taps t = xp.linspace(0, 1, 3, endpoint=False) bs.append(xp.sin(2 * xp.pi * t)) as_.append(3.) hs_whole.append(xp.asarray([0, -0.5j, 0.5j])) hs_half.append(xp.asarray([0, math.sqrt(1./12.), -0.5j])) # 4 taps t = xp.linspace(0, 1, 4, endpoint=False) bs.append(xp.sin(2 * xp.pi * t)) as_.append(0.5) hs_whole.append(xp.asarray([0, -4j, 0, 4j])) hs_half.append(xp.asarray([0, math.sqrt(8), -4j, -math.sqrt(8)])) del t for ii, b in enumerate(bs): # whole a = as_[ii] expected_w = xp.linspace(0, 2 * xp.pi, b.shape[0], endpoint=False) w, h = freqz(b, a, worN=expected_w, whole=True) # polyval err_msg = f'b = {b}, a={a}' assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) w, h = freqz(b, a, worN=b.shape[0], whole=True) # FFT assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) # non-whole expected_w = xp.linspace(0, xp.pi, b.shape[0], endpoint=False) w, h = freqz(b, a, worN=expected_w, whole=False) # polyval assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) w, h = freqz(b, a, worN=b.shape[0], whole=False) # FFT assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) # some random FIR filters (real + complex) # assume polyval is accurate rng = np.random.RandomState(0) for ii in range(2, 10): # number of taps b = xp.asarray(rng.randn(ii)) for kk in range(2): a = xp.asarray(rng.randn(1) if kk == 0 else rng.randn(3)) for jj in range(2): if jj == 1: b = b + xp.asarray(rng.randn(ii)) * 1j # whole expected_w = xp.linspace(0, 2 * xp.pi, ii, endpoint=False) w, expected_h = freqz(b, a, worN=expected_w, whole=True) assert_array_almost_equal(w, expected_w) w, h = freqz(b, a, worN=ii, whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, expected_h, decimal=4) # half expected_w = xp.linspace(0, xp.pi, ii, endpoint=False) w, expected_h = freqz(b, a, worN=expected_w, whole=False) assert_array_almost_equal(w, expected_w) w, h = freqz(b, a, worN=ii, whole=False) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, expected_h, decimal=4) def test_broadcasting1(self, xp): # Test broadcasting with worN an integer or a 1-D array, # b and a are n-dimensional arrays. np.random.seed(123) b = np.random.rand(3, 5, 1) a = np.random.rand(2, 1) b, a = map(xp.asarray, (b, a)) for whole in [False, True]: # Test with worN being integers (one fast for FFT and one not), # a 1-D array, and an empty array. for worN in [16, 17, xp.linspace(0, 1, 10), xp.asarray([])]: w, h = freqz(b, a, worN=worN, whole=whole) for k in range(b.shape[1]): bk = b[:, k, 0] ak = a[:, 0] ww, hh = freqz(bk, ak, worN=worN, whole=whole) xp_assert_close(ww, w) xp_assert_close(hh, h[k, ...]) def test_broadcasting2(self, xp): # Test broadcasting with worN an integer or a 1-D array, # b is an n-dimensional array, and a is left at the default value. np.random.seed(123) b = np.random.rand(3, 5, 1) b = xp.asarray(b) for whole in [False, True]: for worN in [16, 17, xp.linspace(0, 1, 10)]: w, h = freqz(b, worN=worN, whole=whole) for k in range(b.shape[1]): bk = b[:, k, 0] ww, hh = freqz(bk, worN=worN, whole=whole) xp_assert_close(ww, w) xp_assert_close(hh, h[k, :]) def test_broadcasting3(self, xp): # Test broadcasting where b.shape[-1] is the same length # as worN, and a is left at the default value. np.random.seed(123) N = 16 b = np.random.rand(3, N) b = xp.asarray(b) for whole in [False, True]: for worN in [N, xp.linspace(0, 1, N)]: w, h = freqz(b, worN=worN, whole=whole) assert xp_size(w) == N for k in range(N): bk = b[:, k] ww, hh = freqz(bk, worN=w[k], whole=whole) xp_assert_close(ww, xp.asarray(w[k])[None]) xp_assert_close(hh, xp.asarray(h[k])[None]) def test_broadcasting4(self, xp): # Test broadcasting with worN a 2-D array. np.random.seed(123) b = np.random.rand(4, 2, 1, 1) a = np.random.rand(5, 2, 1, 1) b, a = map(xp.asarray, (b, a)) for whole in [False, True]: for worN in [np.random.rand(6, 7), np.empty((6, 0))]: worN = xp.asarray(worN) w, h = freqz(b, a, worN=worN, whole=whole) xp_assert_close(w, worN, rtol=1e-14) assert h.shape == (2,) + worN.shape for k in range(2): ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], worN=xp.reshape(worN, (-1,)), whole=whole) xp_assert_close(ww, xp.reshape(worN, (-1,)), rtol=1e-14) xp_assert_close(hh, xp.reshape(h[k, :, :], (-1,))) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, h1 = freqz([1.0], 1) w2, h2 = freqz([1.0], 1, None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_fs_param(self, xp): fs = 900 b = xp.asarray([0.039479155677484369, 0.11843746703245311, 0.11843746703245311, 0.039479155677484369]) a = xp.asarray([1.0, -1.3199152021838287, 0.80341991081938424, -0.16767146321568049]) # N = None, whole=False w1, h1 = freqz(b, a, fs=fs) w2, h2 = freqz(b, a) xp_assert_close(h1, h2) xp_assert_close(w1, xp.linspace(0, fs/2, 512, endpoint=False)) # N = None, whole=True w1, h1 = freqz(b, a, whole=True, fs=fs) w2, h2 = freqz(b, a, whole=True) xp_assert_close(h1, h2) xp_assert_close(w1, xp.linspace(0, fs, 512, endpoint=False)) # N = 5, whole=False w1, h1 = freqz(b, a, 5, fs=fs) w2, h2 = freqz(b, a, 5) xp_assert_close(h1, h2) xp_assert_close(w1, xp.linspace(0, fs/2, 5, endpoint=False)) # N = 5, whole=True w1, h1 = freqz(b, a, 5, whole=True, fs=fs) w2, h2 = freqz(b, a, 5, whole=True) xp_assert_close(h1, h2) xp_assert_close(w1, xp.linspace(0, fs, 5, endpoint=False)) if is_numpy(xp): # w is an array_like for w in ([123], (123,), xp.asarray([123]), (50, 123, 230), xp.asarray([50, 123, 230])): w1, h1 = freqz(b, a, w, fs=fs) w2, h2 = freqz(b, a, 2*pi*xp.asarray(w, dtype=xp.float64)/ fs) xp_assert_close(h1, h2) xp_assert_close(w1, xp.asarray(w), check_dtype=False) def test_w_or_N_types(self): # Measure at 7 (polyval) or 8 (fft) equally-spaced points for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), np.array(7), 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqz([1.0], worN=N) assert_array_almost_equal(w, np.pi * np.arange(N) / N) assert_array_almost_equal(h, np.ones(N)) w, h = freqz([1.0], worN=N, fs=100) assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) assert_array_almost_equal(h, np.ones(N)) # Measure at frequency 8 Hz for w in (8.0, 8.0+0j): # Only makes sense when fs is specified w_out, h = freqz(np.asarray([1.0]), worN=w, fs=100) assert_array_almost_equal(w_out, np.asarray([8])) assert_array_almost_equal(h, np.asarray(1.), check_0d=False) def test_nyquist(self, xp): w, h = freqz(xp.asarray([1.0]), worN=8, include_nyquist=True) assert_array_almost_equal(w, xp.pi * xp.arange(8, dtype=w.dtype) / 7.) assert_array_almost_equal(h, xp.ones(8)) w, h = freqz(xp.asarray([1.0]), worN=9, include_nyquist=True) assert_array_almost_equal(w, xp.pi * xp.arange(9, dtype=w.dtype) / 8.) assert_array_almost_equal(h, xp.ones(9)) for a in [1, xp.ones(2)]: w, h = freqz(xp.ones(2), a, worN=0, include_nyquist=True) assert w.shape == (0,) assert h.shape == (0,) hdt = xp.complex128 if xp_default_dtype(xp) == xp.float64 else xp.complex64 assert h.dtype == hdt w1, h1 = freqz(xp.asarray([1.0]), worN=8, whole = True, include_nyquist=True) w2, h2 = freqz(xp.asarray([1.0]), worN=8, whole = True, include_nyquist=False) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) # https://github.com/scipy/scipy/issues/17289 # https://github.com/scipy/scipy/issues/15273 @pytest.mark.parametrize('whole,nyquist,worN', [(False, False, 32), (False, True, 32), (True, False, 32), (True, True, 32), (False, False, 257), (False, True, 257), (True, False, 257), (True, True, 257)]) @xfail_xp_backends("cupy", reason="XXX: CuPy's version suspect") def test_17289(self, whole, nyquist, worN, xp): d = xp.asarray([0.0, 1.0]) w, Drfft = freqz(d, worN=32, whole=whole, include_nyquist=nyquist) _, Dpoly = freqz(d, worN=w) xp_assert_close(Drfft, Dpoly) def test_fs_validation(self): with pytest.raises(ValueError, match="Sampling.*single scalar"): freqz([1.0], fs=np.array([10, 20])) with pytest.raises(ValueError, match="Sampling.*be none."): freqz([1.0], fs=None) @make_xp_test_case(freqz_sos)
TestFreqz
python
getsentry__sentry
tests/sentry/api/endpoints/test_relay_projectids.py
{ "start": 574, "end": 4571 }
class ____(APITestCase): _date_regex = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z$") def setUp(self) -> None: self.path = reverse("sentry-api-0-relay-projectids") sk, pk = generate_key_pair() self.public_key = pk self.private_key = sk self.relay_id = str(uuid.uuid4()) self.project = self.create_project() self.project.update_option("sentry:scrub_ip_address", True) self.project_key = self.create_project_key() def _setup_relay(self, add_org_key): self.relay = Relay.objects.create( relay_id=self.relay_id, public_key=self.public_key, ) if add_org_key: org = self.project.organization org.update_option( "sentry:trusted-relays", [{"public_key": str(self.public_key), "name": "main-relay"}], ) def _call_endpoint(self, public_key, internal): raw_json, signature = self.private_key.pack({"publicKeys": [public_key]}) if internal: internal_relays = [str(self.public_key)] else: internal_relays = [] with disable_internal_networks(): with override_settings(SENTRY_RELAY_WHITELIST_PK=internal_relays): resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) return orjson.loads(resp.content), resp.status_code def _call_endpoint_static_relay(self, internal): raw_json, signature = self.private_key.pack({"publicKeys": [str(self.public_key)]}) static_auth = {self.relay_id: {"internal": internal, "public_key": str(self.public_key)}} with override_options({"relay.static_auth": static_auth}): resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) return orjson.loads(resp.content), resp.status_code def test_internal_relay(self) -> None: self._setup_relay(add_org_key=True) public_key = self.project_key.public_key result, status_code = self._call_endpoint(public_key, internal=True) assert status_code < 400 assert safe.get_path(result, "projectIds", public_key) == self.project.id def test_external_relay(self) -> None: self._setup_relay(add_org_key=True) public_key = self.project_key.public_key result, status_code = self._call_endpoint(public_key, internal=False) assert status_code < 400 assert safe.get_path(result, "projectIds", public_key) == self.project.id def test_unknown_key(self) -> None: self._setup_relay(add_org_key=True) public_key = "feedfacefeedfacefeedfacefeedface" result, status_code = self._call_endpoint(public_key, internal=True) assert status_code < 400 with override_settings(SENTRY_RELAY_WHITELIST_PK=[str(self.public_key)]): assert safe.get_path(result, "projectIds", public_key) is None def test_unauthorized_relay(self) -> None: self._setup_relay(add_org_key=False) public_key = self.project_key.public_key result, status_code = self._call_endpoint(public_key, internal=False) assert status_code < 400 # NB: Unauthorized Relays also receive the project id, but cannot fetch # the project ID afterwards. assert safe.get_path(result, "projectIds", public_key) == self.project.id def test_statically_configured_relay(self) -> None: result, status_code = self._call_endpoint_static_relay(internal=True) assert status_code < 400
RelayProjectIdsEndpointTest
python
celery__celery
celery/bootsteps.py
{ "start": 1790, "end": 8198 }
class ____: """Blueprint containing bootsteps that can be applied to objects. Arguments: steps Sequence[Union[str, Step]]: List of steps. name (str): Set explicit name for this blueprint. on_start (Callable): Optional callback applied after blueprint start. on_close (Callable): Optional callback applied before blueprint close. on_stopped (Callable): Optional callback applied after blueprint stopped. """ GraphFormatter = StepFormatter name = None state = None started = 0 default_steps = set() state_to_name = { 0: 'initializing', RUN: 'running', CLOSE: 'closing', TERMINATE: 'terminating', } def __init__(self, steps=None, name=None, on_start=None, on_close=None, on_stopped=None): self.name = name or self.name or qualname(type(self)) self.types = set(steps or []) | set(self.default_steps) self.on_start = on_start self.on_close = on_close self.on_stopped = on_stopped self.shutdown_complete = Event() self.steps = {} def start(self, parent): self.state = RUN if self.on_start: self.on_start() for i, step in enumerate(s for s in parent.steps if s is not None): self._debug('Starting %s', step.alias) self.started = i + 1 step.start(parent) logger.debug('^-- substep ok') def human_state(self): return self.state_to_name[self.state or 0] def info(self, parent): info = {} for step in parent.steps: info.update(step.info(parent) or {}) return info def close(self, parent): if self.on_close: self.on_close() self.send_all(parent, 'close', 'closing', reverse=False) def restart(self, parent, method='stop', description='restarting', propagate=False): self.send_all(parent, method, description, propagate=propagate) def send_all(self, parent, method, description=None, reverse=True, propagate=True, args=()): description = description or method.replace('_', ' ') steps = reversed(parent.steps) if reverse else parent.steps for step in steps: if step: fun = getattr(step, method, None) if fun is not None: self._debug('%s %s...', description.capitalize(), step.alias) try: fun(parent, *args) except Exception as exc: # pylint: disable=broad-except if propagate: raise logger.exception( 'Error on %s %s: %r', description, step.alias, exc) def stop(self, parent, close=True, terminate=False): what = 'terminating' if terminate else 'stopping' if self.state in (CLOSE, TERMINATE): return if self.state != RUN or self.started != len(parent.steps): # Not fully started, can safely exit. self.state = TERMINATE self.shutdown_complete.set() return self.close(parent) self.state = CLOSE self.restart( parent, 'terminate' if terminate else 'stop', description=what, propagate=False, ) if self.on_stopped: self.on_stopped() self.state = TERMINATE self.shutdown_complete.set() def join(self, timeout=None): try: # Will only get here if running green, # makes sure all greenthreads have exited. self.shutdown_complete.wait(timeout=timeout) except IGNORE_ERRORS: pass def apply(self, parent, **kwargs): """Apply the steps in this blueprint to an object. This will apply the ``__init__`` and ``include`` methods of each step, with the object as argument:: step = Step(obj) ... step.include(obj) For :class:`StartStopStep` the services created will also be added to the objects ``steps`` attribute. """ self._debug('Preparing bootsteps.') order = self.order = [] steps = self.steps = self.claim_steps() self._debug('Building graph...') for S in self._finalize_steps(steps): step = S(parent, **kwargs) steps[step.name] = step order.append(step) self._debug('New boot order: {%s}', ', '.join(s.alias for s in self.order)) for step in order: step.include(parent) return self def connect_with(self, other): self.graph.adjacent.update(other.graph.adjacent) self.graph.add_edge(type(other.order[0]), type(self.order[-1])) def __getitem__(self, name): return self.steps[name] def _find_last(self): return next((C for C in self.steps.values() if C.last), None) def _firstpass(self, steps): for step in steps.values(): step.requires = [symbol_by_name(dep) for dep in step.requires] stream = deque(step.requires for step in steps.values()) while stream: for node in stream.popleft(): node = symbol_by_name(node) if node.name not in self.steps: steps[node.name] = node stream.append(node.requires) def _finalize_steps(self, steps): last = self._find_last() self._firstpass(steps) it = ((C, C.requires) for C in steps.values()) G = self.graph = DependencyGraph( it, formatter=self.GraphFormatter(root=last), ) if last: for obj in G: if obj != last: G.add_edge(last, obj) try: return G.topsort() except KeyError as exc: raise KeyError('unknown bootstep: %s' % exc) def claim_steps(self): return dict(self.load_step(step) for step in self.types) def load_step(self, step): step = symbol_by_name(step) return step.name, step def _debug(self, msg, *args): return logger.debug(_pre(self, msg), *args) @property def alias(self): return _label(self)
Blueprint
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/exceptions.py
{ "start": 1747, "end": 1864 }
class ____(AirflowException): """When parse_s3_url fails to parse URL, this error is thrown."""
S3HookUriParseFailure
python
gevent__gevent
src/gevent/_util.py
{ "start": 5691, "end": 6410 }
class ____(object): """ A non-data descriptor similar to :class:`property`. The difference is that the property can be assigned to directly, without invoking a setter function. When the property is assigned to, it is cached in the instance and the function is not called on that instance again. Contrast with `Lazy`, which caches the result of the function in the instance the first time it is called and never calls the function on that instance again. """ def __init__(self, func): self.func = func update_wrapper(self, func) def __get__(self, inst, class_): if inst is None: return self return self.func(inst)
readproperty
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/notifications/test_chime.py
{ "start": 1074, "end": 2966 }
class ____: # Chime webhooks can't really have a default connection, so we need to create one for tests. @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id="default-chime-webhook", conn_type="chime", host="hooks.chime.aws/incomingwebhooks/", password="abcd-1134-ZeDA?token=somechimetoken111", schema="https", ) ) @mock.patch.object(ChimeWebhookHook, "send_message") def test_chime_notifier(self, mock_chime_hook, create_dag_without_db): notifier = send_chime_notification( chime_conn_id="default-chime-webhook", message="Chime Test Message" ) notifier({"dag": create_dag_without_db("test_chime_notifier")}) mock_chime_hook.assert_called_once_with(message="Chime Test Message") @mock.patch.object(ChimeWebhookHook, "send_message") def test_chime_notifier_with_notifier_class(self, mock_chime_hook, create_dag_without_db): notifier = ChimeNotifier( chime_conn_id="default-chime-webhook", message="Test Chime Message for Class" ) notifier({"dag": create_dag_without_db("test_chime_notifier")}) mock_chime_hook.assert_called_once_with(message="Test Chime Message for Class") @mock.patch.object(ChimeWebhookHook, "send_message") def test_chime_notifier_templated(self, mock_chime_hook, create_dag_without_db): notifier = send_chime_notification( chime_conn_id="default-chime-webhook", message="Test Chime Message. Dag is {{ dag.dag_id }}." ) notifier({"dag": create_dag_without_db("test_chime_notifier")}) mock_chime_hook.assert_called_once_with(message="Test Chime Message. Dag is test_chime_notifier.")
TestChimeNotifier
python
pydantic__pydantic
.github/actions/people/people.py
{ "start": 3195, "end": 3365 }
class ____(BaseModel): """Represents a comment node with creation time and author information.""" createdAt: datetime author: Author | None = None
CommentsNode
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/test_organization_test_fire_action.py
{ "start": 1181, "end": 9857 }
class ____(APITestCase, BaseWorkflowTest): endpoint = "sentry-api-0-organization-test-fire-actions" method = "POST" def setUp(self) -> None: super().setUp() self.login_as(self.user) self.project = self.create_project(organization=self.organization) self.detector = self.create_detector(project=self.project) self.issue_stream_detector = self.create_detector( project=self.project, type=IssueStreamGroupType.slug ) self.workflow = self.create_workflow() def setup_pd_service(self) -> PagerDutyServiceDict: service_info = { "type": "service", "integration_key": "PND123", "service_name": "Sentry Service", } _integration, org_integration = self.create_provider_integration_for( provider="pagerduty", name="Example PagerDuty", external_id="example-pd", metadata={"services": [service_info]}, organization=self.organization, user=self.user, ) with assume_test_silo_mode(SiloMode.CONTROL): return add_service( org_integration, service_name=service_info["service_name"], integration_key=service_info["integration_key"], ) @mock.patch.object(PagerDutyClient, "send_trigger") @mock.patch( "sentry.notifications.notification_action.registry.group_type_notification_registry.get", return_value=IssueAlertRegistryHandler, ) @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=PagerDutyIssueAlertHandler, ) def test_pagerduty_action( self, mock_get_issue_alert_handler: mock.MagicMock, mock_get_group_type_handler: mock.MagicMock, mock_send_trigger: mock.MagicMock, ) -> None: """Test a PagerDuty action""" service_info = self.setup_pd_service() action_data = [ { "type": Action.Type.PAGERDUTY.value, "integration_id": service_info["integration_id"], "data": { "priority": "info", }, "config": { "target_identifier": str(service_info["id"]), "target_type": "specific", }, } ] response = self.get_success_response(self.organization.slug, actions=action_data) assert response.status_code == 200 assert mock_send_trigger.call_count == 1 pagerduty_data = mock_send_trigger.call_args.kwargs.get("data") assert pagerduty_data is not None assert pagerduty_data["payload"]["summary"].startswith(f"[{self.detector.name}]:") @mock.patch.object(NotifyEventAction, "after") @mock.patch( "sentry.notifications.notification_action.registry.group_type_notification_registry.get", return_value=IssueAlertRegistryHandler, ) @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=PluginIssueAlertHandler, ) def test_plugin_notify_event_action( self, mock_get_issue_alert_handler: mock.MagicMock, mock_get_group_type_handler: mock.MagicMock, mock_after: mock.MagicMock, ) -> None: """Test a Plugin action (NotifyEventAction)""" action_data = [ { "type": Action.Type.PLUGIN.value, "data": {}, "config": {}, } ] response = self.get_success_response(self.organization.slug, actions=action_data) assert response.status_code == 200 assert mock_after.called @mock.patch.object(JiraIntegration, "create_issue") @mock.patch.object(sentry_sdk, "capture_exception") def test_action_with_integration_form_error( self, mock_sdk_capture: mock.MagicMock, mock_create_issue: mock.MagicMock ) -> None: """Test that integration form errors are returned correctly""" with assume_test_silo_mode(SiloMode.CONTROL): self.jira_integration = self.create_provider_integration( provider="jira", name="Jira", external_id="jira:1" ) self.jira_integration.add_organization(self.organization, self.user) form_errors = {"broken": "something went wrong"} mock_create_issue.side_effect = IntegrationFormError(form_errors) mock_sdk_capture.return_value = "abc-1234" action_data = [ { "type": Action.Type.JIRA.value, "integration_id": self.jira_integration.id, "data": { "dynamic_form_fields": [ { "fake_field": "fake_value", } ], }, "config": {"target_type": "specific"}, } ] response = self.get_error_response(self.organization.slug, actions=action_data) assert response.status_code == 400 assert response.data == {"actions": [str(form_errors)]} assert mock_create_issue.call_count == 1 @mock.patch.object(JiraIntegration, "create_issue") @mock.patch.object(sentry_sdk, "capture_exception") def test_action_with_unexpected_error( self, mock_sdk_capture: mock.MagicMock, mock_create_issue: mock.MagicMock ) -> None: """Test that unexpected errors are handled correctly""" with assume_test_silo_mode(SiloMode.CONTROL): self.jira_integration = self.create_provider_integration( provider="jira", name="Jira", external_id="jira:1" ) self.jira_integration.add_organization(self.organization, self.user) mock_create_issue.side_effect = Exception("Something went wrong") mock_sdk_capture.return_value = "abc-1234" action_data = [ { "type": Action.Type.JIRA.value, "integration_id": self.jira_integration.id, "data": { "dynamic_form_fields": [ { "fake_field": "fake_value", } ], }, "config": { "target_type": "specific", }, } ] response = self.get_error_response(self.organization.slug, actions=action_data) assert response.status_code == 400 assert mock_create_issue.call_count == 1 assert response.data == {"actions": ["An unexpected error occurred. Error ID: 'abc-1234'"]} def test_no_projects_available(self) -> None: """Test behavior when no projects are available for the organization""" Project.objects.filter(organization=self.organization).delete() action_data = [ { "type": Action.Type.PLUGIN.value, "data": {}, "config": {}, } ] response = self.get_error_response(self.organization.slug, actions=action_data) assert response.status_code == 400 assert response.data == { "detail": "No projects found for this organization that the user has access to" } @mock.patch( "sentry.notifications.notification_action.types.BaseIssueAlertHandler.send_test_notification" ) @mock.patch("sentry.integrations.slack.actions.form.get_channel_id") def test_updates_action_with_validated_data( self, mock_get_channel_id: mock.MagicMock, mock_send_test_notification: mock.MagicMock ) -> None: self.integration, self.org_integration = self.create_provider_integration_for( provider="slack", organization=self.organization, user=self.user, name="slack", metadata={"domain_name": "https://slack.com"}, ) action_data = [ { "type": Action.Type.SLACK, "config": {"targetDisplay": "cathy-sentry", "targetType": "specific"}, "data": {"tags": "asdf"}, "integrationId": self.integration.id, } ] mock_get_channel_id.return_value = SlackChannelIdData( prefix="#", channel_id="C1234567890", timed_out=False ) self.get_success_response(self.organization.slug, actions=action_data) assert mock_send_test_notification.call_count == 1
TestFireActionsEndpointTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/workbook/test_initialisation.py
{ "start": 299, "end": 894 }
class ____(unittest.TestCase): """ Test initialisation of the Workbook class and call a method. """ def setUp(self): self.fh = StringIO() self.workbook = Workbook() self.workbook._set_filehandle(self.fh) def test_xml_declaration(self): """Test Workbook xml_declaration()""" self.workbook._xml_declaration() exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n""" got = self.fh.getvalue() self.assertEqual(exp, got) def tearDown(self): self.workbook.fileclosed = 1
TestInitialisation
python
numba__numba
numba/cuda/stubs.py
{ "start": 5611, "end": 6030 }
class ____(Stub): ''' activemask() Returns a 32-bit integer mask of all currently active threads in the calling warp. The Nth bit is set if the Nth lane in the warp is active when activemask() is called. Inactive threads are represented by 0 bits in the returned mask. Threads which have exited the kernel are always marked as inactive. ''' _description_ = '<activemask()>'
activemask
python
getsentry__sentry
tests/sentry/plugins/test_integration_repository.py
{ "start": 585, "end": 5438 }
class ____(TestCase): @responses.activate def setUp(self) -> None: super().setUp() self.integration = self.create_integration( organization=self.organization, provider="github", external_id="654321" ) self.repo_name = "getsentry/sentry" self.config = { "identifier": self.repo_name, "external_id": "654321", "integration_id": self.integration.id, "url": "https://github.com/getsentry/sentry", } responses.add( responses.GET, "https://api.github.com/repos/" + self.repo_name, json={ "id": 1296269, "node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5", "name": "example-repo", "full_name": self.repo_name, }, ) @cached_property def provider(self): return GitHubRepositoryProvider("integrations:github") def _create_repo( self, external_id=None, name=None, status=ObjectStatus.ACTIVE, integration_id=None ): if not name: name = self.repo_name return Repository.objects.create( name=name, provider="integrations:github", organization_id=self.organization.id, integration_id=integration_id if integration_id else self.integration.id, url="https://github.com/" + name, config={"name": name}, external_id=external_id if external_id else "123456", status=status, ) def test_create_repository(self, get_jwt: MagicMock) -> None: self.provider.create_repository(self.config, self.organization) repos = Repository.objects.all() assert len(repos) == 1 assert repos[0].name == self.repo_name assert repos[0].provider == "integrations:github" def test_create_repository__repo_exists(self, get_jwt: MagicMock) -> None: self._create_repo(external_id=self.config["external_id"]) with pytest.raises(RepoExistsError): self.provider.create_repository(self.config, self.organization) def test_create_repository__transfer_repo_in_org(self, get_jwt: MagicMock) -> None: # can transfer a disabled repo from one integration to another in a single org integration = self.create_integration( organization=self.organization, provider="github", external_id="123456" ) self._create_repo( external_id=self.config["external_id"], name="getsentry/santry", status=ObjectStatus.DISABLED, integration_id=integration.id, ) _, repo = self.provider.create_repository(self.config, self.organization) assert repo.name == self.config["identifier"] assert repo.url == self.config["url"] def test_create_repository__repo_exists_update_name(self, get_jwt: MagicMock) -> None: repo = self._create_repo(external_id=self.config["external_id"], name="getsentry/santry") with pytest.raises(RepoExistsError): self.provider.create_repository(self.config, self.organization) repo.refresh_from_db() assert repo.name == self.repo_name @patch("sentry.models.Repository.objects.create") @patch("sentry.plugins.providers.IntegrationRepositoryProvider.on_delete_repository") def test_create_repository__delete_webhook( self, mock_on_delete: MagicMock, mock_repo: MagicMock, get_jwt: MagicMock ) -> None: self._create_repo() mock_repo.side_effect = IntegrityError mock_on_delete.side_effect = IntegrationError with pytest.raises(RepoExistsError): self.provider.create_repository(self.config, self.organization) @patch("sentry.plugins.providers.integration_repository.metrics") def test_create_repository__activates_existing_hidden_repo( self, mock_metrics: MagicMock, get_jwt: MagicMock ) -> None: repo = self._create_repo(external_id=self.config["external_id"]) repo.status = ObjectStatus.HIDDEN repo.save() self.provider.create_repository(self.config, self.organization) repo.refresh_from_db() assert repo.status == ObjectStatus.ACTIVE mock_metrics.incr.assert_called_with("sentry.integration_repo_provider.repo_relink") def test_create_repository__only_activates_hidden_repo(self, get_jwt: MagicMock) -> None: repo = self._create_repo(external_id=self.config["external_id"]) repo.status = ObjectStatus.PENDING_DELETION repo.save() with pytest.raises(RepoExistsError): self.provider.create_repository(self.config, self.organization) repo.refresh_from_db() assert repo.status == ObjectStatus.PENDING_DELETION
IntegrationRepositoryTestCase
python
jpadilla__pyjwt
jwt/algorithms.py
{ "start": 9867, "end": 33569 }
class ____(Algorithm): """ Performs signing and verification operations using HMAC and the specified hash function. """ SHA256: ClassVar[HashlibHash] = hashlib.sha256 SHA384: ClassVar[HashlibHash] = hashlib.sha384 SHA512: ClassVar[HashlibHash] = hashlib.sha512 def __init__(self, hash_alg: HashlibHash) -> None: self.hash_alg = hash_alg def prepare_key(self, key: str | bytes) -> bytes: key_bytes = force_bytes(key) if is_pem_format(key_bytes) or is_ssh_key(key_bytes): raise InvalidKeyError( "The specified key is an asymmetric key or x509 certificate and" " should not be used as an HMAC secret." ) return key_bytes @overload @staticmethod def to_jwk( key_obj: str | bytes, as_dict: Literal[True] ) -> JWKDict: ... # pragma: no cover @overload @staticmethod def to_jwk( key_obj: str | bytes, as_dict: Literal[False] = False ) -> str: ... # pragma: no cover @staticmethod def to_jwk(key_obj: str | bytes, as_dict: bool = False) -> JWKDict | str: jwk = { "k": base64url_encode(force_bytes(key_obj)).decode(), "kty": "oct", } if as_dict: return jwk else: return json.dumps(jwk) @staticmethod def from_jwk(jwk: str | JWKDict) -> bytes: try: if isinstance(jwk, str): obj: JWKDict = json.loads(jwk) elif isinstance(jwk, dict): obj = jwk else: raise ValueError except ValueError: raise InvalidKeyError("Key is not valid JSON") from None if obj.get("kty") != "oct": raise InvalidKeyError("Not an HMAC key") return base64url_decode(obj["k"]) def sign(self, msg: bytes, key: bytes) -> bytes: return hmac.new(key, msg, self.hash_alg).digest() def verify(self, msg: bytes, key: bytes, sig: bytes) -> bool: return hmac.compare_digest(sig, self.sign(msg, key)) if has_crypto: class RSAAlgorithm(Algorithm): """ Performs signing and verification operations using RSASSA-PKCS-v1_5 and the specified hash function. """ SHA256: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA256 SHA384: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA384 SHA512: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA512 _crypto_key_types = ALLOWED_RSA_KEY_TYPES def __init__(self, hash_alg: type[hashes.HashAlgorithm]) -> None: self.hash_alg = hash_alg def prepare_key(self, key: AllowedRSAKeys | str | bytes) -> AllowedRSAKeys: if isinstance(key, self._crypto_key_types): return key if not isinstance(key, (bytes, str)): raise TypeError("Expecting a PEM-formatted key.") key_bytes = force_bytes(key) try: if key_bytes.startswith(b"ssh-rsa"): public_key: PublicKeyTypes = load_ssh_public_key(key_bytes) self.check_crypto_key_type(public_key) return cast(RSAPublicKey, public_key) else: private_key: PrivateKeyTypes = load_pem_private_key( key_bytes, password=None ) self.check_crypto_key_type(private_key) return cast(RSAPrivateKey, private_key) except ValueError: try: public_key = load_pem_public_key(key_bytes) self.check_crypto_key_type(public_key) return cast(RSAPublicKey, public_key) except (ValueError, UnsupportedAlgorithm): raise InvalidKeyError( "Could not parse the provided public key." ) from None @overload @staticmethod def to_jwk( key_obj: AllowedRSAKeys, as_dict: Literal[True] ) -> JWKDict: ... # pragma: no cover @overload @staticmethod def to_jwk( key_obj: AllowedRSAKeys, as_dict: Literal[False] = False ) -> str: ... # pragma: no cover @staticmethod def to_jwk(key_obj: AllowedRSAKeys, as_dict: bool = False) -> JWKDict | str: obj: dict[str, Any] | None = None if hasattr(key_obj, "private_numbers"): # Private key numbers = key_obj.private_numbers() obj = { "kty": "RSA", "key_ops": ["sign"], "n": to_base64url_uint(numbers.public_numbers.n).decode(), "e": to_base64url_uint(numbers.public_numbers.e).decode(), "d": to_base64url_uint(numbers.d).decode(), "p": to_base64url_uint(numbers.p).decode(), "q": to_base64url_uint(numbers.q).decode(), "dp": to_base64url_uint(numbers.dmp1).decode(), "dq": to_base64url_uint(numbers.dmq1).decode(), "qi": to_base64url_uint(numbers.iqmp).decode(), } elif hasattr(key_obj, "verify"): # Public key numbers = key_obj.public_numbers() obj = { "kty": "RSA", "key_ops": ["verify"], "n": to_base64url_uint(numbers.n).decode(), "e": to_base64url_uint(numbers.e).decode(), } else: raise InvalidKeyError("Not a public or private key") if as_dict: return obj else: return json.dumps(obj) @staticmethod def from_jwk(jwk: str | JWKDict) -> AllowedRSAKeys: try: if isinstance(jwk, str): obj = json.loads(jwk) elif isinstance(jwk, dict): obj = jwk else: raise ValueError except ValueError: raise InvalidKeyError("Key is not valid JSON") from None if obj.get("kty") != "RSA": raise InvalidKeyError("Not an RSA key") from None if "d" in obj and "e" in obj and "n" in obj: # Private key if "oth" in obj: raise InvalidKeyError( "Unsupported RSA private key: > 2 primes not supported" ) other_props = ["p", "q", "dp", "dq", "qi"] props_found = [prop in obj for prop in other_props] any_props_found = any(props_found) if any_props_found and not all(props_found): raise InvalidKeyError( "RSA key must include all parameters if any are present besides d" ) from None public_numbers = RSAPublicNumbers( from_base64url_uint(obj["e"]), from_base64url_uint(obj["n"]), ) if any_props_found: numbers = RSAPrivateNumbers( d=from_base64url_uint(obj["d"]), p=from_base64url_uint(obj["p"]), q=from_base64url_uint(obj["q"]), dmp1=from_base64url_uint(obj["dp"]), dmq1=from_base64url_uint(obj["dq"]), iqmp=from_base64url_uint(obj["qi"]), public_numbers=public_numbers, ) else: d = from_base64url_uint(obj["d"]) p, q = rsa_recover_prime_factors( public_numbers.n, d, public_numbers.e ) numbers = RSAPrivateNumbers( d=d, p=p, q=q, dmp1=rsa_crt_dmp1(d, p), dmq1=rsa_crt_dmq1(d, q), iqmp=rsa_crt_iqmp(p, q), public_numbers=public_numbers, ) return numbers.private_key() elif "n" in obj and "e" in obj: # Public key return RSAPublicNumbers( from_base64url_uint(obj["e"]), from_base64url_uint(obj["n"]), ).public_key() else: raise InvalidKeyError("Not a public or private key") def sign(self, msg: bytes, key: RSAPrivateKey) -> bytes: return key.sign(msg, padding.PKCS1v15(), self.hash_alg()) def verify(self, msg: bytes, key: RSAPublicKey, sig: bytes) -> bool: try: key.verify(sig, msg, padding.PKCS1v15(), self.hash_alg()) return True except InvalidSignature: return False class ECAlgorithm(Algorithm): """ Performs signing and verification operations using ECDSA and the specified hash function """ SHA256: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA256 SHA384: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA384 SHA512: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA512 _crypto_key_types = ALLOWED_EC_KEY_TYPES def __init__(self, hash_alg: type[hashes.HashAlgorithm]) -> None: self.hash_alg = hash_alg def prepare_key(self, key: AllowedECKeys | str | bytes) -> AllowedECKeys: if isinstance(key, self._crypto_key_types): return key if not isinstance(key, (bytes, str)): raise TypeError("Expecting a PEM-formatted key.") key_bytes = force_bytes(key) # Attempt to load key. We don't know if it's # a Signing Key or a Verifying Key, so we try # the Verifying Key first. try: if key_bytes.startswith(b"ecdsa-sha2-"): public_key: PublicKeyTypes = load_ssh_public_key(key_bytes) else: public_key = load_pem_public_key(key_bytes) # Explicit check the key to prevent confusing errors from cryptography self.check_crypto_key_type(public_key) return cast(EllipticCurvePublicKey, public_key) except ValueError: private_key = load_pem_private_key(key_bytes, password=None) self.check_crypto_key_type(private_key) return cast(EllipticCurvePrivateKey, private_key) def sign(self, msg: bytes, key: EllipticCurvePrivateKey) -> bytes: der_sig = key.sign(msg, ECDSA(self.hash_alg())) return der_to_raw_signature(der_sig, key.curve) def verify(self, msg: bytes, key: AllowedECKeys, sig: bytes) -> bool: try: der_sig = raw_to_der_signature(sig, key.curve) except ValueError: return False try: public_key = ( key.public_key() if isinstance(key, EllipticCurvePrivateKey) else key ) public_key.verify(der_sig, msg, ECDSA(self.hash_alg())) return True except InvalidSignature: return False @overload @staticmethod def to_jwk( key_obj: AllowedECKeys, as_dict: Literal[True] ) -> JWKDict: ... # pragma: no cover @overload @staticmethod def to_jwk( key_obj: AllowedECKeys, as_dict: Literal[False] = False ) -> str: ... # pragma: no cover @staticmethod def to_jwk(key_obj: AllowedECKeys, as_dict: bool = False) -> JWKDict | str: if isinstance(key_obj, EllipticCurvePrivateKey): public_numbers = key_obj.public_key().public_numbers() elif isinstance(key_obj, EllipticCurvePublicKey): public_numbers = key_obj.public_numbers() else: raise InvalidKeyError("Not a public or private key") if isinstance(key_obj.curve, SECP256R1): crv = "P-256" elif isinstance(key_obj.curve, SECP384R1): crv = "P-384" elif isinstance(key_obj.curve, SECP521R1): crv = "P-521" elif isinstance(key_obj.curve, SECP256K1): crv = "secp256k1" else: raise InvalidKeyError(f"Invalid curve: {key_obj.curve}") obj: dict[str, Any] = { "kty": "EC", "crv": crv, "x": to_base64url_uint( public_numbers.x, bit_length=key_obj.curve.key_size, ).decode(), "y": to_base64url_uint( public_numbers.y, bit_length=key_obj.curve.key_size, ).decode(), } if isinstance(key_obj, EllipticCurvePrivateKey): obj["d"] = to_base64url_uint( key_obj.private_numbers().private_value, bit_length=key_obj.curve.key_size, ).decode() if as_dict: return obj else: return json.dumps(obj) @staticmethod def from_jwk(jwk: str | JWKDict) -> AllowedECKeys: try: if isinstance(jwk, str): obj = json.loads(jwk) elif isinstance(jwk, dict): obj = jwk else: raise ValueError except ValueError: raise InvalidKeyError("Key is not valid JSON") from None if obj.get("kty") != "EC": raise InvalidKeyError("Not an Elliptic curve key") from None if "x" not in obj or "y" not in obj: raise InvalidKeyError("Not an Elliptic curve key") from None x = base64url_decode(obj.get("x")) y = base64url_decode(obj.get("y")) curve = obj.get("crv") curve_obj: EllipticCurve if curve == "P-256": if len(x) == len(y) == 32: curve_obj = SECP256R1() else: raise InvalidKeyError( "Coords should be 32 bytes for curve P-256" ) from None elif curve == "P-384": if len(x) == len(y) == 48: curve_obj = SECP384R1() else: raise InvalidKeyError( "Coords should be 48 bytes for curve P-384" ) from None elif curve == "P-521": if len(x) == len(y) == 66: curve_obj = SECP521R1() else: raise InvalidKeyError( "Coords should be 66 bytes for curve P-521" ) from None elif curve == "secp256k1": if len(x) == len(y) == 32: curve_obj = SECP256K1() else: raise InvalidKeyError( "Coords should be 32 bytes for curve secp256k1" ) else: raise InvalidKeyError(f"Invalid curve: {curve}") public_numbers = EllipticCurvePublicNumbers( x=int.from_bytes(x, byteorder="big"), y=int.from_bytes(y, byteorder="big"), curve=curve_obj, ) if "d" not in obj: return public_numbers.public_key() d = base64url_decode(obj.get("d")) if len(d) != len(x): raise InvalidKeyError( "D should be {} bytes for curve {}", len(x), curve ) return EllipticCurvePrivateNumbers( int.from_bytes(d, byteorder="big"), public_numbers ).private_key() class RSAPSSAlgorithm(RSAAlgorithm): """ Performs a signature using RSASSA-PSS with MGF1 """ def sign(self, msg: bytes, key: RSAPrivateKey) -> bytes: return key.sign( msg, padding.PSS( mgf=padding.MGF1(self.hash_alg()), salt_length=self.hash_alg().digest_size, ), self.hash_alg(), ) def verify(self, msg: bytes, key: RSAPublicKey, sig: bytes) -> bool: try: key.verify( sig, msg, padding.PSS( mgf=padding.MGF1(self.hash_alg()), salt_length=self.hash_alg().digest_size, ), self.hash_alg(), ) return True except InvalidSignature: return False class OKPAlgorithm(Algorithm): """ Performs signing and verification operations using EdDSA This class requires ``cryptography>=2.6`` to be installed. """ _crypto_key_types = ALLOWED_OKP_KEY_TYPES def __init__(self, **kwargs: Any) -> None: pass def prepare_key(self, key: AllowedOKPKeys | str | bytes) -> AllowedOKPKeys: if not isinstance(key, (str, bytes)): self.check_crypto_key_type(key) return key key_str = key.decode("utf-8") if isinstance(key, bytes) else key key_bytes = key.encode("utf-8") if isinstance(key, str) else key loaded_key: PublicKeyTypes | PrivateKeyTypes if "-----BEGIN PUBLIC" in key_str: loaded_key = load_pem_public_key(key_bytes) elif "-----BEGIN PRIVATE" in key_str: loaded_key = load_pem_private_key(key_bytes, password=None) elif key_str[0:4] == "ssh-": loaded_key = load_ssh_public_key(key_bytes) else: raise InvalidKeyError("Not a public or private key") # Explicit check the key to prevent confusing errors from cryptography self.check_crypto_key_type(loaded_key) return cast("AllowedOKPKeys", loaded_key) def sign( self, msg: str | bytes, key: Ed25519PrivateKey | Ed448PrivateKey ) -> bytes: """ Sign a message ``msg`` using the EdDSA private key ``key`` :param str|bytes msg: Message to sign :param Ed25519PrivateKey}Ed448PrivateKey key: A :class:`.Ed25519PrivateKey` or :class:`.Ed448PrivateKey` isinstance :return bytes signature: The signature, as bytes """ msg_bytes = msg.encode("utf-8") if isinstance(msg, str) else msg return key.sign(msg_bytes) def verify( self, msg: str | bytes, key: AllowedOKPKeys, sig: str | bytes ) -> bool: """ Verify a given ``msg`` against a signature ``sig`` using the EdDSA key ``key`` :param str|bytes sig: EdDSA signature to check ``msg`` against :param str|bytes msg: Message to sign :param Ed25519PrivateKey|Ed25519PublicKey|Ed448PrivateKey|Ed448PublicKey key: A private or public EdDSA key instance :return bool verified: True if signature is valid, False if not. """ try: msg_bytes = msg.encode("utf-8") if isinstance(msg, str) else msg sig_bytes = sig.encode("utf-8") if isinstance(sig, str) else sig public_key = ( key.public_key() if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey)) else key ) public_key.verify(sig_bytes, msg_bytes) return True # If no exception was raised, the signature is valid. except InvalidSignature: return False @overload @staticmethod def to_jwk( key: AllowedOKPKeys, as_dict: Literal[True] ) -> JWKDict: ... # pragma: no cover @overload @staticmethod def to_jwk( key: AllowedOKPKeys, as_dict: Literal[False] = False ) -> str: ... # pragma: no cover @staticmethod def to_jwk(key: AllowedOKPKeys, as_dict: bool = False) -> JWKDict | str: if isinstance(key, (Ed25519PublicKey, Ed448PublicKey)): x = key.public_bytes( encoding=Encoding.Raw, format=PublicFormat.Raw, ) crv = "Ed25519" if isinstance(key, Ed25519PublicKey) else "Ed448" obj = { "x": base64url_encode(force_bytes(x)).decode(), "kty": "OKP", "crv": crv, } if as_dict: return obj else: return json.dumps(obj) if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey)): d = key.private_bytes( encoding=Encoding.Raw, format=PrivateFormat.Raw, encryption_algorithm=NoEncryption(), ) x = key.public_key().public_bytes( encoding=Encoding.Raw, format=PublicFormat.Raw, ) crv = "Ed25519" if isinstance(key, Ed25519PrivateKey) else "Ed448" obj = { "x": base64url_encode(force_bytes(x)).decode(), "d": base64url_encode(force_bytes(d)).decode(), "kty": "OKP", "crv": crv, } if as_dict: return obj else: return json.dumps(obj) raise InvalidKeyError("Not a public or private key") @staticmethod def from_jwk(jwk: str | JWKDict) -> AllowedOKPKeys: try: if isinstance(jwk, str): obj = json.loads(jwk) elif isinstance(jwk, dict): obj = jwk else: raise ValueError except ValueError: raise InvalidKeyError("Key is not valid JSON") from None if obj.get("kty") != "OKP": raise InvalidKeyError("Not an Octet Key Pair") curve = obj.get("crv") if curve != "Ed25519" and curve != "Ed448": raise InvalidKeyError(f"Invalid curve: {curve}") if "x" not in obj: raise InvalidKeyError('OKP should have "x" parameter') x = base64url_decode(obj.get("x")) try: if "d" not in obj: if curve == "Ed25519": return Ed25519PublicKey.from_public_bytes(x) return Ed448PublicKey.from_public_bytes(x) d = base64url_decode(obj.get("d")) if curve == "Ed25519": return Ed25519PrivateKey.from_private_bytes(d) return Ed448PrivateKey.from_private_bytes(d) except ValueError as err: raise InvalidKeyError("Invalid key parameter") from err
HMACAlgorithm
python
Lightning-AI__lightning
src/lightning/fabric/utilities/throughput.py
{ "start": 27249, "end": 28195 }
class ____(list[T]): """Custom fixed size list that only supports right-append and ensures that all values increase monotonically.""" def __init__(self, maxlen: int) -> None: super().__init__() self.maxlen = maxlen @property def last(self) -> Optional[T]: if len(self) > 0: return self[-1] return None @override def append(self, x: T) -> None: last = self.last if last is not None and last >= x: raise ValueError(f"Expected the value to increase, last: {last}, current: {x}") list.append(self, x) # truncate excess if len(self) > self.maxlen: del self[0] @override def __setitem__(self, key: Any, value: Any) -> None: # assigning is not implemented since we don't use it. it could be by checking all previous values raise NotImplementedError("__setitem__ is not supported")
_MonotonicWindow
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/gcs.py
{ "start": 41559, "end": 46010 }
class ____(GoogleCloudBaseOperator): """ Synchronizes the contents of the buckets or bucket's directories in the Google Cloud Services. Parameters ``source_object`` and ``destination_object`` describe the root sync directory. If they are not passed, the entire bucket will be synchronized. They should point to directories. .. note:: The synchronization of individual files is not supported. Only entire directories can be synchronized. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GCSSynchronizeBucketsOperator` :param source_bucket: The name of the bucket containing the source objects. :param destination_bucket: The name of the bucket containing the destination objects. :param source_object: The root sync directory in the source bucket. :param destination_object: The root sync directory in the destination bucket. :param recursive: If True, subdirectories will be considered :param allow_overwrite: if True, the files will be overwritten if a mismatched file is found. By default, overwriting files is not allowed :param delete_extra_files: if True, deletes additional files from the source that not found in the destination. By default extra files are not deleted. .. note:: This option can delete data quickly if you specify the wrong source/destination combination. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "source_bucket", "destination_bucket", "source_object", "destination_object", "recursive", "delete_extra_files", "allow_overwrite", "gcp_conn_id", "impersonation_chain", ) operator_extra_links = (StorageLink(),) def __init__( self, *, source_bucket: str, destination_bucket: str, source_object: str | None = None, destination_object: str | None = None, recursive: bool = True, delete_extra_files: bool = False, allow_overwrite: bool = False, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.source_bucket = source_bucket self.destination_bucket = destination_bucket self.source_object = source_object self.destination_object = destination_object self.recursive = recursive self.delete_extra_files = delete_extra_files self.allow_overwrite = allow_overwrite self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context) -> None: hook = GCSHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) StorageLink.persist( context=context, uri=self._get_uri(self.destination_bucket, self.destination_object), project_id=hook.project_id, ) hook.sync( source_bucket=self.source_bucket, destination_bucket=self.destination_bucket, source_object=self.source_object, destination_object=self.destination_object, recursive=self.recursive, delete_extra_files=self.delete_extra_files, allow_overwrite=self.allow_overwrite, ) def _get_uri(self, gcs_bucket: str, gcs_object: str | None) -> str: if gcs_object and gcs_object[-1] == "/": gcs_object = gcs_object[:-1] return f"{gcs_bucket}/{gcs_object}" if gcs_object else gcs_bucket
GCSSynchronizeBucketsOperator
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/output/color_depth.py
{ "start": 103, "end": 1569 }
class ____(str, Enum): """ Possible color depth values for the output. """ value: str #: One color only. DEPTH_1_BIT = "DEPTH_1_BIT" #: ANSI Colors. DEPTH_4_BIT = "DEPTH_4_BIT" #: The default. DEPTH_8_BIT = "DEPTH_8_BIT" #: 24 bit True color. DEPTH_24_BIT = "DEPTH_24_BIT" # Aliases. MONOCHROME = DEPTH_1_BIT ANSI_COLORS_ONLY = DEPTH_4_BIT DEFAULT = DEPTH_8_BIT TRUE_COLOR = DEPTH_24_BIT @classmethod def from_env(cls) -> ColorDepth | None: """ Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment variable has been set. This is a way to enforce a certain color depth in all prompt_toolkit applications. """ # Disable color if a `NO_COLOR` environment variable is set. # See: https://no-color.org/ if os.environ.get("NO_COLOR"): return cls.DEPTH_1_BIT # Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable. all_values = [i.value for i in ColorDepth] if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values: return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"]) return None @classmethod def default(cls) -> ColorDepth: """ Return the default color depth for the default output. """ from .defaults import create_output return create_output().get_default_color_depth()
ColorDepth
python
django__django
tests/generic_views/test_list.py
{ "start": 272, "end": 12688 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.artist1 = Artist.objects.create(name="Rene Magritte") cls.author1 = Author.objects.create( name="Roberto Bolaño", slug="roberto-bolano" ) cls.author2 = Author.objects.create( name="Scott Rosenberg", slug="scott-rosenberg" ) cls.book1 = Book.objects.create( name="2066", slug="2066", pages=800, pubdate=datetime.date(2008, 10, 1) ) cls.book1.authors.add(cls.author1) cls.book2 = Book.objects.create( name="Dreaming in Code", slug="dreaming-in-code", pages=300, pubdate=datetime.date(2006, 5, 1), ) cls.page1 = Page.objects.create( content="I was once bitten by a moose.", template="generic_views/page_template.html", ) def test_items(self): res = self.client.get("/list/dict/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/list.html") self.assertEqual(res.context["object_list"][0]["first"], "John") def test_queryset(self): res = self.client.get("/list/authors/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertIsInstance(res.context["view"], View) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertIsNone(res.context["paginator"]) self.assertIsNone(res.context["page_obj"]) self.assertFalse(res.context["is_paginated"]) def test_paginated_queryset(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(len(res.context["object_list"]), 30) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertTrue(res.context["is_paginated"]) self.assertEqual(res.context["page_obj"].number, 1) self.assertEqual(res.context["paginator"].num_pages, 4) self.assertEqual(res.context["author_list"][0].name, "Author 00") self.assertEqual(list(res.context["author_list"])[-1].name, "Author 29") def test_paginated_queryset_shortdata(self): # Short datasets also result in a paginated view. res = self.client.get("/list/authors/paginated/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertEqual(res.context["page_obj"].number, 1) self.assertEqual(res.context["paginator"].num_pages, 1) self.assertFalse(res.context["is_paginated"]) def test_paginated_get_page_by_query_string(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/", {"page": "2"}) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(len(res.context["object_list"]), 30) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertEqual(res.context["author_list"][0].name, "Author 30") self.assertEqual(res.context["page_obj"].number, 2) def test_paginated_get_last_page_by_query_string(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/", {"page": "last"}) self.assertEqual(res.status_code, 200) self.assertEqual(len(res.context["object_list"]), 10) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertEqual(res.context["author_list"][0].name, "Author 90") self.assertEqual(res.context["page_obj"].number, 4) def test_paginated_get_page_by_urlvar(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/3/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(len(res.context["object_list"]), 30) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertEqual(res.context["author_list"][0].name, "Author 60") self.assertEqual(res.context["page_obj"].number, 3) def test_paginated_page_out_of_range(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/42/") self.assertEqual(res.status_code, 404) def test_paginated_invalid_page(self): self._make_authors(100) res = self.client.get("/list/authors/paginated/?page=frog") self.assertEqual(res.status_code, 404) def test_paginated_custom_paginator_class(self): self._make_authors(7) res = self.client.get("/list/authors/paginated/custom_class/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["paginator"].num_pages, 1) # Custom pagination allows for 2 orphans on a page size of 5 self.assertEqual(len(res.context["object_list"]), 7) def test_paginated_custom_page_kwarg(self): self._make_authors(100) res = self.client.get( "/list/authors/paginated/custom_page_kwarg/", {"pagina": "2"} ) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/author_list.html") self.assertEqual(len(res.context["object_list"]), 30) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertEqual(res.context["author_list"][0].name, "Author 30") self.assertEqual(res.context["page_obj"].number, 2) def test_paginated_custom_paginator_constructor(self): self._make_authors(7) res = self.client.get("/list/authors/paginated/custom_constructor/") self.assertEqual(res.status_code, 200) # Custom pagination allows for 2 orphans on a page size of 5 self.assertEqual(len(res.context["object_list"]), 7) def test_paginated_orphaned_queryset(self): self._make_authors(92) res = self.client.get("/list/authors/paginated-orphaned/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["page_obj"].number, 1) res = self.client.get("/list/authors/paginated-orphaned/", {"page": "last"}) self.assertEqual(res.status_code, 200) self.assertEqual(res.context["page_obj"].number, 3) res = self.client.get("/list/authors/paginated-orphaned/", {"page": "3"}) self.assertEqual(res.status_code, 200) self.assertEqual(res.context["page_obj"].number, 3) res = self.client.get("/list/authors/paginated-orphaned/", {"page": "4"}) self.assertEqual(res.status_code, 404) def test_paginated_non_queryset(self): res = self.client.get("/list/dict/paginated/") self.assertEqual(res.status_code, 200) self.assertEqual(len(res.context["object_list"]), 1) def test_verbose_name(self): res = self.client.get("/list/artists/") self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, "generic_views/list.html") self.assertEqual(list(res.context["object_list"]), list(Artist.objects.all())) self.assertIs(res.context["artist_list"], res.context["object_list"]) self.assertIsNone(res.context["paginator"]) self.assertIsNone(res.context["page_obj"]) self.assertFalse(res.context["is_paginated"]) def test_allow_empty_false(self): res = self.client.get("/list/authors/notempty/") self.assertEqual(res.status_code, 200) Author.objects.all().delete() res = self.client.get("/list/authors/notempty/") self.assertEqual(res.status_code, 404) def test_template_name(self): res = self.client.get("/list/authors/template_name/") self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertTemplateUsed(res, "generic_views/list.html") def test_template_name_suffix(self): res = self.client.get("/list/authors/template_name_suffix/") self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertTemplateUsed(res, "generic_views/author_objects.html") def test_context_object_name(self): res = self.client.get("/list/authors/context_object_name/") self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertNotIn("authors", res.context) self.assertIs(res.context["author_list"], res.context["object_list"]) self.assertTemplateUsed(res, "generic_views/author_list.html") def test_duplicate_context_object_name(self): res = self.client.get("/list/authors/dupe_context_object_name/") self.assertEqual(res.status_code, 200) self.assertEqual(list(res.context["object_list"]), list(Author.objects.all())) self.assertNotIn("authors", res.context) self.assertNotIn("author_list", res.context) self.assertTemplateUsed(res, "generic_views/author_list.html") def test_missing_items(self): msg = ( "AuthorList is missing a QuerySet. Define AuthorList.model, " "AuthorList.queryset, or override AuthorList.get_queryset()." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get("/list/authors/invalid/") def test_invalid_get_queryset(self): msg = ( "AuthorListGetQuerysetReturnsNone requires either a 'template_name' " "attribute or a get_queryset() method that returns a QuerySet." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): self.client.get("/list/authors/get_queryset/") def test_paginated_list_view_does_not_load_entire_table(self): # Regression test for #17535 self._make_authors(3) # 1 query for authors with self.assertNumQueries(1): self.client.get("/list/authors/notempty/") # same as above + 1 query to test if authors exist + 1 query for # pagination with self.assertNumQueries(3): self.client.get("/list/authors/notempty/paginated/") def test_explicitly_ordered_list_view(self): Book.objects.create( name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1) ) res = self.client.get("/list/books/sorted/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["object_list"][0].name, "2066") self.assertEqual(res.context["object_list"][1].name, "Dreaming in Code") self.assertEqual(res.context["object_list"][2].name, "Zebras for Dummies") res = self.client.get("/list/books/sortedbypagesandnamedec/") self.assertEqual(res.status_code, 200) self.assertEqual(res.context["object_list"][0].name, "Dreaming in Code") self.assertEqual(res.context["object_list"][1].name, "Zebras for Dummies") self.assertEqual(res.context["object_list"][2].name, "2066") @override_settings(DEBUG=True) def test_paginated_list_view_returns_useful_message_on_invalid_page(self): # test for #19240 # tests that source exception's message is included in page self._make_authors(1) res = self.client.get("/list/authors/paginated/2/") self.assertEqual(res.status_code, 404) self.assertEqual( res.context.get("reason"), "Invalid page (2): That page contains no results" ) def _make_authors(self, n): Author.objects.all().delete() for i in range(n): Author.objects.create(name="Author %02i" % i, slug="a%s" % i)
ListViewTests
python
django__django
tests/backends/test_utils.py
{ "start": 378, "end": 3717 }
class ____(SimpleTestCase): def test_truncate_name(self): self.assertEqual(truncate_name("some_table", 10), "some_table") self.assertEqual(truncate_name("some_long_table", 10), "some_la38a") self.assertEqual(truncate_name("some_long_table", 10, 3), "some_loa38") self.assertEqual(truncate_name("some_long_table"), "some_long_table") # "user"."table" syntax self.assertEqual( truncate_name('username"."some_table', 10), 'username"."some_table' ) self.assertEqual( truncate_name('username"."some_long_table', 10), 'username"."some_la38a' ) self.assertEqual( truncate_name('username"."some_long_table', 10, 3), 'username"."some_loa38' ) def test_split_identifier(self): self.assertEqual(split_identifier("some_table"), ("", "some_table")) self.assertEqual(split_identifier('"some_table"'), ("", "some_table")) self.assertEqual( split_identifier('namespace"."some_table'), ("namespace", "some_table") ) self.assertEqual( split_identifier('"namespace"."some_table"'), ("namespace", "some_table") ) def test_format_number(self): def equal(value, max_d, places, result): self.assertEqual(format_number(Decimal(value), max_d, places), result) equal("0", 12, 3, "0.000") equal("0", 12, 8, "0.00000000") equal("1", 12, 9, "1.000000000") equal("0.00000000", 12, 8, "0.00000000") equal("0.000000004", 12, 8, "0.00000000") equal("0.000000008", 12, 8, "0.00000001") equal("0.000000000000000000999", 10, 8, "0.00000000") equal("0.1234567890", 12, 10, "0.1234567890") equal("0.1234567890", 12, 9, "0.123456789") equal("0.1234567890", 12, 8, "0.12345679") equal("0.1234567890", 12, 5, "0.12346") equal("0.1234567890", 12, 3, "0.123") equal("0.1234567890", 12, 1, "0.1") equal("0.1234567890", 12, 0, "0") equal("0.1234567890", None, 0, "0") equal("1234567890.1234567890", None, 0, "1234567890") equal("1234567890.1234567890", None, 2, "1234567890.12") equal("0.1234", 5, None, "0.1234") equal("123.12", 5, None, "123.12") with self.assertRaises(Rounded): equal("0.1234567890", 5, None, "0.12346") with self.assertRaises(Rounded): equal("1234567890.1234", 5, None, "1234600000") def test_split_tzname_delta(self): tests = [ ("Asia/Ust+Nera", ("Asia/Ust+Nera", None, None)), ("Asia/Ust-Nera", ("Asia/Ust-Nera", None, None)), ("Asia/Ust+Nera-02:00", ("Asia/Ust+Nera", "-", "02:00")), ("Asia/Ust-Nera+05:00", ("Asia/Ust-Nera", "+", "05:00")), ("America/Coral_Harbour-01:00", ("America/Coral_Harbour", "-", "01:00")), ("America/Coral_Harbour+02:30", ("America/Coral_Harbour", "+", "02:30")), ("UTC+15:00", ("UTC", "+", "15:00")), ("UTC-04:43", ("UTC", "-", "04:43")), ("UTC", ("UTC", None, None)), ("UTC+1", ("UTC+1", None, None)), ] for tzname, expected in tests: with self.subTest(tzname=tzname): self.assertEqual(split_tzname_delta(tzname), expected)
TestUtils
python
django__django
django/middleware/cache.py
{ "start": 7482, "end": 8741 }
class ____(UpdateCacheMiddleware, FetchFromCacheMiddleware): """ Cache middleware that provides basic behavior for many simple sites. Also used as the hook point for the cache decorator, which is generated using the decorator-from-middleware utility. """ def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs): super().__init__(get_response) # We need to differentiate between "provided, but using default value", # and "not provided". If the value is provided using a default, then # we fall back to system defaults. If it is not provided at all, # we need to use middleware defaults. try: key_prefix = kwargs["key_prefix"] if key_prefix is None: key_prefix = "" self.key_prefix = key_prefix except KeyError: pass try: cache_alias = kwargs["cache_alias"] if cache_alias is None: cache_alias = DEFAULT_CACHE_ALIAS self.cache_alias = cache_alias except KeyError: pass if cache_timeout is not None: self.cache_timeout = cache_timeout self.page_timeout = page_timeout
CacheMiddleware
python
PrefectHQ__prefect
tests/infrastructure/provisioners/test_ecs.py
{ "start": 25206, "end": 38050 }
class ____: @pytest.fixture def provisioner(self): return ElasticContainerServicePushProvisioner() @pytest.fixture def mock_console(self): return MagicMock() @pytest.fixture def mock_confirm(self): with patch("prefect.infrastructure.provisioners.ecs.Confirm") as mock: yield mock @pytest.fixture def mock_importlib(self): with patch("prefect.infrastructure.provisioners.ecs.importlib") as mock: yield mock async def test_prompt_boto3_installation( self, provisioner, mock_confirm, mock_run_process, mock_ainstall_packages ): await provisioner._prompt_boto3_installation() mock_ainstall_packages.assert_called_once_with(["boto3"]) def test_is_boto3_installed(self, provisioner, mock_importlib): assert provisioner.is_boto3_installed() mock_importlib.import_module.assert_called_once() @pytest.mark.usefixtures("register_block_types") async def test_provision_boto3_not_installed_interactive( self, provisioner, mock_confirm, mock_run_process: MagicMock, mock_importlib, mock_ainstall_packages, ): mock_confirm.ask.side_effect = [ True, False, True, ] # install boto3, do not customize, proceed with provisioning mock_importlib.import_module.side_effect = [ModuleNotFoundError, boto3] provisioner.console.is_interactive = True await provisioner.provision( work_pool_name="test-work-pool", base_job_template={ "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) mock_confirm.ask.calls = [ call( ( "boto3 is required to configure your AWS account. Would you like to" " install it?" ), console=ANY, ), call( "Proceed with infrastructure provisioning?", console=ANY, ), ] mock_ainstall_packages.assert_called_once_with(["boto3"]) assert mock_run_process.mock_calls == [ call( "docker login -u AWS -p 123456789012-auth-token" " https://123456789012.dkr.ecr.us-east-1.amazonaws.com" ), ] async def test_provision_boto3_not_installed_non_interactive( self, provisioner, mock_confirm, mock_importlib ): mock_importlib.import_module.side_effect = [ModuleNotFoundError, boto3] with pytest.raises(RuntimeError): await provisioner.provision( work_pool_name="test-work-pool", base_job_template={ "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) mock_confirm.ask.assert_not_called() async def test_provision_boto3_installed_interactive( self, provisioner, mock_console, mock_confirm ): mock_console.is_interactive = True mock_confirm.ask.side_effect = [False, False] provisioner.console.is_interactive = True provisioner.is_boto3_installed = MagicMock(return_value=True) result = await provisioner.provision("test-work-pool", {}) assert result == {} assert mock_confirm.ask.call_count == 2 expected_call_1 = call( ( "Would you like to customize the resource names for your" " infrastructure? This includes an IAM user, IAM policy, ECS cluster," " VPC, ECS security group, and ECR repository." ), ) expected_call_2 = call("Proceed with infrastructure provisioning?", console=ANY) assert mock_confirm.ask.call_args_list[0] == expected_call_1 assert mock_confirm.ask.call_args_list[1] == expected_call_2 @pytest.mark.usefixtures("register_block_types", "no_default_vpc") async def test_provision_interactive_with_default_names( self, provisioner, mock_confirm, prefect_client, mock_run_process, capsys ): provisioner.console.is_interactive = True mock_confirm.ask.side_effect = [ False, True, ] # do not customize, proceed with provisioning result = await provisioner.provision( "test-work-pool", { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) ec2 = boto3.resource("ec2") vpc = ec2.Vpc(result["variables"]["properties"]["vpc_id"]["default"]) assert vpc is not None ecs = boto3.client("ecs") clusters = ecs.list_clusters()["clusterArns"] assert ( f"arn:aws:ecs:us-east-1:123456789012:cluster/{result['variables']['properties']['cluster']['default']}" in clusters ) block_document = await prefect_client.read_block_document_by_name( "test-work-pool-aws-credentials", "aws-credentials" ) assert result["variables"]["properties"]["aws_credentials"] == { "default": {"$ref": {"block_document_id": str(block_document.id)}}, } mock_run_process.assert_called_with( "docker login -u AWS -p 123456789012-auth-token" " https://123456789012.dkr.ecr.us-east-1.amazonaws.com" ) captured = capsys.readouterr() assert "Your default Docker build namespace has been set" in captured.out assert ( result["variables"]["properties"]["cluster"]["default"] == "prefect-ecs-cluster" ) @pytest.mark.usefixtures("register_block_types", "no_default_vpc") async def test_provision_interactive_with_custom_names( self, provisioner, mock_confirm, prefect_client, mock_run_process, capsys, monkeypatch, ): provisioner.console.is_interactive = True mock_confirm.ask.side_effect = [ True, # customize True, # proceed with provisioning ] def prompt_mocks(*args, **kwargs): if "Enter a name for the IAM user" in args[0]: return "custom-iam-user" elif "Enter a name for the IAM policy" in args[0]: return "custom-iam-policy" elif "Enter a name for the ECS cluster" in args[0]: return "custom-ecs-cluster" elif "Enter a name for the AWS credentials block" in args[0]: return "custom-aws-credentials" elif "Enter a name for the VPC" in args[0]: return "custom-vpc" elif "Enter a name for the ECS security group" in args[0]: return "custom-ecs-security-group" elif "Enter a name for the ECR repository" in args[0]: return "custom-ecr-repository" else: raise ValueError(f"Unexpected prompt: {args[0]}") mock_prompt = MagicMock(side_effect=prompt_mocks) monkeypatch.setattr("prefect.cli._prompts.prompt", mock_prompt) result = await provisioner.provision( "test-work-pool", { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) ec2 = boto3.resource("ec2") vpc = ec2.Vpc(result["variables"]["properties"]["vpc_id"]["default"]) assert vpc is not None ecs = boto3.client("ecs") clusters = ecs.list_clusters()["clusterArns"] assert ( f"arn:aws:ecs:us-east-1:123456789012:cluster/{result['variables']['properties']['cluster']['default']}" in clusters ) block_document = await prefect_client.read_block_document_by_name( "custom-aws-credentials", "aws-credentials" ) assert result["variables"]["properties"]["aws_credentials"] == { "default": {"$ref": {"block_document_id": str(block_document.id)}}, } mock_run_process.assert_called_with( "docker login -u AWS -p 123456789012-auth-token" " https://123456789012.dkr.ecr.us-east-1.amazonaws.com" ) captured = capsys.readouterr() assert "Your default Docker build namespace has been set" in captured.out assert ( result["variables"]["properties"]["cluster"]["default"] == "custom-ecs-cluster" ) async def test_provision_interactive_reject_provisioning( self, provisioner, mock_confirm ): provisioner.console.is_interactive = True mock_confirm.ask.side_effect = [ False, False, ] # do not customize, do not proceed with provisioning original_base_template = { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } } unchanged_base_job_template = await provisioner.provision( "test-work-pool", original_base_template ) assert unchanged_base_job_template == original_base_template @pytest.mark.usefixtures( "register_block_types", "no_default_vpc", "mock_run_process" ) async def test_provision_idempotent(self, provisioner, mock_confirm): provisioner.console.is_interactive = True mock_confirm.ask.side_effect = [ False, True, False, True, ] # do not customize, proceed with provisioning (for each) result_1 = await provisioner.provision( "test-work-pool", { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) result_2 = await provisioner.provision( "test-work-pool", { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) assert result_1 == result_2 async def test_raises_runtime_error_on_failure(self, provisioner): """ Cause a failure by not registering the block types and ensure that a RuntimeError is raised. """ with pytest.raises( RuntimeError, match=r'Unable to find block type "aws-credentials"' ): await provisioner.provision( "test-work-pool", { "variables": { "type": "object", "properties": { "vpc_id": {}, "cluster": {}, "aws_credentials": {}, "execution_role_arn": {}, }, } }, ) def test_resolve_provisoner(): assert isinstance( get_infrastructure_provisioner_for_work_pool_type("ecs:push"), ElasticContainerServicePushProvisioner, ) @pytest.fixture def execution_role_resource(): return ExecutionRoleResource(execution_role_name="PrefectEcsTaskExecutionRole")
TestElasticContainerServicePushProvisioner
python
walkccc__LeetCode
solutions/3084. Count Substrings Starting and Ending with Given Character/3084.py
{ "start": 0, "end": 124 }
class ____: def countSubstrings(self, s: str, c: str) -> int: freq = s.count(c) return freq * (freq + 1) // 2
Solution
python
numpy__numpy
numpy/_core/tests/test_multiarray.py
{ "start": 219394, "end": 221804 }
class ____: def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): unchecked_types = [bytes, str, np.void] x = np.random.random(24) * 100 x = x.reshape((2, 3, 4)) for types in np._core.sctypes.values(): for T in types: if T not in unchecked_types: self.tst_basic(x.copy().astype(T)) # Also test string of a length which uses an untypical length self.tst_basic(x.astype("S3")) def test_raise(self): x = np.random.random(24) * 100 x = x.reshape((2, 3, 4)) assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24) * 100 x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24) * 100 x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) @pytest.mark.parametrize('dtype', ('>i4', '<i4')) def test_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) def test_out_overlap(self): # gh-6272 check overlap on out x = np.arange(5) y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap') assert_equal(y, np.array([1, 2, 3])) @pytest.mark.parametrize('shape', [(1, 2), (1,), ()]) def test_ret_is_out(self, shape): # 0d arrays should not be an exception to this rule x = np.arange(5) inds = np.zeros(shape, dtype=np.intp) out = np.zeros(shape, dtype=x.dtype) ret = np.take(x, inds, out=out) assert ret is out
TestTake
python
eriklindernoren__ML-From-Scratch
mlfromscratch/deep_learning/activation_functions.py
{ "start": 1344, "end": 1845 }
class ____(): # Reference : https://arxiv.org/abs/1706.02515, # https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb def __init__(self): self.alpha = 1.6732632423543772848170429916717 self.scale = 1.0507009873554804934193349852946 def __call__(self, x): return self.scale * np.where(x >= 0.0, x, self.alpha*(np.exp(x)-1)) def gradient(self, x): return self.scale * np.where(x >= 0.0, 1, self.alpha * np.exp(x))
SELU
python
doocs__leetcode
solution/3000-3099/3085.Minimum Deletions to Make String K-Special/Solution.py
{ "start": 0, "end": 392 }
class ____: def minimumDeletions(self, word: str, k: int) -> int: def f(v: int) -> int: ans = 0 for x in nums: if x < v: ans += x elif x > v + k: ans += x - v - k return ans nums = Counter(word).values() return min(f(v) for v in range(len(word) + 1))
Solution
python
getsentry__sentry
src/sentry/relay/config/__init__.py
{ "start": 42724, "end": 46260 }
class ____: """ Base class for configuration objects Offers a readonly configuration class that can be serialized to json and viewed as a simple dictionary >>> x = _ConfigBase( a= 1, b="The b", c= _ConfigBase(x=33, y = _ConfigBase(m=3.14159 , w=[1,2,3], z={'t':1}))) >>> x.a 1 >>> x.b 'The b' >>> x.something is None # accessing non-existing elements True >>> x.c.y.w [1, 2, 3] """ def __init__(self, **kwargs: Any) -> None: data: MutableMapping[str, Any] = {} object.__setattr__(self, "data", data) for key, val in kwargs.items(): if val is not None: data[key] = val def __setattr__(self, key: str, value: Any) -> None: raise Exception("Trying to change read only ProjectConfig object") def __getattr__(self, name: str) -> Any | Mapping[str, Any]: data = self.__get_data() return data.get(to_camel_case_name(name)) def to_dict(self) -> MutableMapping[str, Any]: """ Converts the config object into a dictionary :return: A dictionary containing the object properties, with config properties also converted in dictionaries >>> x = _ConfigBase( a= 1, b="The b", c= _ConfigBase(x=33, y = _ConfigBase(m=3.14159 , w=[1,2,3], z={'t':1}))) >>> x.to_dict() == {'a': 1, 'c': {'y': {'m': 3.14159, 'w': [1, 2, 3], 'z':{'t': 1}}, 'x': 33}, 'b': 'The b'} True """ data = self.__get_data() return { key: value.to_dict() if isinstance(value, _ConfigBase) else value for (key, value) in data.items() } def to_json_string(self) -> Any: """ >>> x = _ConfigBase( a = _ConfigBase(b = _ConfigBase( w=[1,2,3]))) >>> x.to_json_string() '{"a": {"b": {"w": [1, 2, 3]}}}' :return: """ data = self.to_dict() return utils.json.dumps(data) def get_at_path(self, *args: str) -> Any: """ Gets an element at the specified path returning None if the element or the path doesn't exists :param args: the path to follow ( a list of strings) :return: the element if present at specified path or None otherwise) >>> x = _ConfigBase( a= 1, b="The b", c= _ConfigBase(x=33, y = _ConfigBase(m=3.14159 , w=[1,2,3], z={'t':1}))) >>> x.get_at_path('c','y','m') 3.14159 >>> x.get_at_path('bb') is None # property not set True >>> x.get_at_path('a', 'something') is None # trying to go past existing Config paths True >>> x.get_at_path('c','y','z') {'t': 1} >>> x.get_at_path('c','y','z','t') is None # only navigates in ConfigBase does not try to go into normal dicts. True """ if len(args) == 0: return self data = self.__get_data() val = data.get(args[0]) if len(args) == 1: return val if isinstance(val, _ConfigBase): return val.get_at_path(*args[1:]) return None # property not set or path goes beyond the Config defined valid path def __get_data(self) -> Mapping[str, Any]: return object.__getattribute__(self, "data") def __str__(self) -> str: try: return utils.json.dumps(self.to_dict(), sort_keys=True) # type: ignore[arg-type] except Exception as e: return f"Content Error:{e}" def __repr__(self) -> str: return f"({self.__class__.__name__}){self}"
_ConfigBase
python
indygreg__python-build-standalone
src/github_api_tester.py
{ "start": 3943, "end": 4474 }
class ____: name: str label: str | None def __post_init__(self): self.hasher = hashlib.sha256() if self.name == "SHA256SUMS": self.contents = b"" else: self.contents = None def update(self, chunk: bytes) -> None: self.hasher.update(chunk) if self.contents is not None: self.contents += chunk def to_asset(self) -> Asset: return Asset(self.name, self.label, self.hasher.hexdigest(), self.contents) @dataclasses.dataclass
Upload
python
django__django
django/contrib/auth/mixins.py
{ "start": 293, "end": 2327 }
class ____: """ Abstract CBV mixin that gives access mixins the same customizable functionality. """ login_url = None permission_denied_message = "" raise_exception = False redirect_field_name = REDIRECT_FIELD_NAME def get_login_url(self): """ Override this method to override the login_url attribute. """ login_url = self.login_url or settings.LOGIN_URL if not login_url: raise ImproperlyConfigured( f"{self.__class__.__name__} is missing the login_url attribute. Define " f"{self.__class__.__name__}.login_url, settings.LOGIN_URL, or override " f"{self.__class__.__name__}.get_login_url()." ) return str(login_url) def get_permission_denied_message(self): """ Override this method to override the permission_denied_message attribute. """ return self.permission_denied_message def get_redirect_field_name(self): """ Override this method to override the redirect_field_name attribute. """ return self.redirect_field_name def handle_no_permission(self): if self.raise_exception or self.request.user.is_authenticated: raise PermissionDenied(self.get_permission_denied_message()) path = self.request.build_absolute_uri() resolved_login_url = resolve_url(self.get_login_url()) # If the login url is the same scheme and net location then use the # path as the "next" url. login_scheme, login_netloc = urlsplit(resolved_login_url)[:2] current_scheme, current_netloc = urlsplit(path)[:2] if (not login_scheme or login_scheme == current_scheme) and ( not login_netloc or login_netloc == current_netloc ): path = self.request.get_full_path() return redirect_to_login( path, resolved_login_url, self.get_redirect_field_name(), )
AccessMixin
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1175466, "end": 1178592 }
class ____(VegaLiteSchema): """ SelectionParameter schema wrapper. Parameters ---------- name : str, :class:`ParameterName` Required. A unique name for the selection parameter. Selection names should be valid JavaScript identifiers: they should contain only alphanumeric characters (or "$", or "_") and may not start with a digit. Reserved keywords that may not be used as parameter names are "datum", "event", "item", and "parent". select : dict, :class:`SelectionType`, Literal['point', 'interval'], :class:`PointSelectionConfig`, :class:`IntervalSelectionConfig` Determines the default event processing and data query for the selection. Vega-Lite currently supports two selection types: * ``"point"`` -- to select multiple discrete data values; the first value is selected on ``click`` and additional values toggled on shift-click. * ``"interval"`` -- to select a continuous range of data values on ``drag``. bind : dict, :class:`Binding`, :class:`BindInput`, :class:`BindRange`, :class:`BindDirect`, :class:`BindCheckbox`, :class:`LegendBinding`, :class:`BindRadioSelect`, Literal['legend', 'scales'], :class:`LegendStreamBinding` When set, a selection is populated by input elements (also known as dynamic query widgets) or by interacting with the corresponding legend. Direct manipulation interaction is disabled by default; to re-enable it, set the selection's `on <https://vega.github.io/vega-lite/docs/selection.html#common-selection-properties>`__ property. Legend bindings are restricted to selections that only specify a single field or encoding. Query widget binding takes the form of Vega's `input element binding definition <https://vega.github.io/vega/docs/signals/#bind>`__ or can be a mapping between projected field/encodings and binding definitions. **See also:** `bind <https://vega.github.io/vega-lite/docs/bind.html>`__ documentation. value : str, bool, dict, float, :class:`DateTime`, :class:`SelectionInit`, :class:`PrimitiveValue`, :class:`SelectionInitIntervalMapping`, Sequence[dict, :class:`SelectionInitMapping`], None Initialize the selection with a mapping between `projected channels or field names <https://vega.github.io/vega-lite/docs/selection.html#project>`__ and initial values. **See also:** `init <https://vega.github.io/vega-lite/docs/value.html>`__ documentation. """ _schema = {"$ref": "#/definitions/SelectionParameter"} def __init__( self, name: Optional[str | SchemaBase] = Undefined, select: Optional[SchemaBase | Map | SelectionType_T] = Undefined, bind: Optional[SchemaBase | Literal["legend", "scales"] | Map] = Undefined, value: Optional[ Temporal | SchemaBase | Sequence[SchemaBase | Map] | Map | PrimitiveValue_T ] = Undefined, **kwds, ): super().__init__(name=name, select=select, bind=bind, value=value, **kwds)
SelectionParameter
python
apache__airflow
airflow-core/tests/unit/triggers/test_callback.py
{ "start": 1742, "end": 5124 }
class ____: @pytest.fixture def mock_import_string(self): with mock.patch("airflow.triggers.callback.import_string") as m: yield m @pytest.mark.parametrize( ("callback_init_kwargs", "expected_serialized_kwargs"), [ pytest.param(None, {}, id="no kwargs"), pytest.param(TEST_CALLBACK_KWARGS, TEST_CALLBACK_KWARGS, id="non-empty kwargs"), ], ) def test_serialization(self, callback_init_kwargs, expected_serialized_kwargs): trigger = CallbackTrigger( callback_path=TEST_CALLBACK_PATH, callback_kwargs=callback_init_kwargs, ) classpath, kwargs = trigger.serialize() assert classpath == "airflow.triggers.callback.CallbackTrigger" assert kwargs == { "callback_path": TEST_CALLBACK_PATH, "callback_kwargs": expected_serialized_kwargs, } @pytest.mark.asyncio async def test_run_success_with_async_function(self, mock_import_string): """Test trigger handles async functions correctly.""" callback_return_value = "some value" mock_callback = mock.AsyncMock(return_value=callback_return_value) mock_import_string.return_value = mock_callback trigger_gen = TEST_TRIGGER.run() running_event = await anext(trigger_gen) assert running_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.RUNNING success_event = await anext(trigger_gen) mock_import_string.assert_called_once_with(TEST_CALLBACK_PATH) mock_callback.assert_called_once_with(**TEST_CALLBACK_KWARGS) assert success_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.SUCCESS assert success_event.payload[PAYLOAD_BODY_KEY] == callback_return_value @pytest.mark.asyncio async def test_run_success_with_notifier(self, mock_import_string): """Test trigger handles async notifier classes correctly.""" mock_import_string.return_value = ExampleAsyncNotifier trigger_gen = TEST_TRIGGER.run() running_event = await anext(trigger_gen) assert running_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.RUNNING success_event = await anext(trigger_gen) mock_import_string.assert_called_once_with(TEST_CALLBACK_PATH) assert success_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.SUCCESS assert ( success_event.payload[PAYLOAD_BODY_KEY] == f"Async notification: {TEST_MESSAGE}, context: {{'dag_run': 'test'}}" ) @pytest.mark.asyncio async def test_run_failure(self, mock_import_string): exc_msg = "Something went wrong" mock_callback = mock.AsyncMock(side_effect=RuntimeError(exc_msg)) mock_import_string.return_value = mock_callback trigger_gen = TEST_TRIGGER.run() running_event = await anext(trigger_gen) assert running_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.RUNNING failure_event = await anext(trigger_gen) mock_import_string.assert_called_once_with(TEST_CALLBACK_PATH) mock_callback.assert_called_once_with(**TEST_CALLBACK_KWARGS) assert failure_event.payload[PAYLOAD_STATUS_KEY] == CallbackState.FAILED assert all(s in failure_event.payload[PAYLOAD_BODY_KEY] for s in ["raise", "RuntimeError", exc_msg])
TestCallbackTrigger
python
Netflix__metaflow
metaflow/user_decorators/mutable_step.py
{ "start": 399, "end": 16791 }
class ____: IGNORE = 1 ERROR = 2 OVERRIDE = 3 def __init__( self, flow_spec: "metaflow.flowspec.FlowSpec", step: Union[ Callable[["metaflow.decorators.FlowSpecDerived"], None], Callable[["metaflow.decorators.FlowSpecDerived", Any], None], ], pre_mutate: bool = False, statically_defined: bool = False, inserted_by: Optional[str] = None, ): from .mutable_flow import MutableFlow self._mutable_container = MutableFlow( flow_spec, pre_mutate=pre_mutate, statically_defined=statically_defined, inserted_by=inserted_by, ) self._flow_cls = flow_spec.__class__ self._my_step = step self._pre_mutate = pre_mutate self._statically_defined = statically_defined self._inserted_by = inserted_by if self._inserted_by is None: # This is an error because MutableSteps should only be created by # StepMutators or FlowMutators. We need to catch it now because otherwise # we may put stuff on the command line (with --with) that would get added # twice and weird behavior may ensue. raise MetaflowException( "MutableStep should only be created by StepMutators or FlowMutators. " "This is an internal error." ) @property def flow(self) -> "metaflow.user_decorator.mutable_flow.MutableFlow": """ The flow that contains this step Returns ------- MutableFlow The flow that contains this step """ return self._mutable_container @property def decorator_specs( self, ) -> Generator[Tuple[str, str, List[Any], Dict[str, Any]], None, None]: """ Iterate over all the decorator specifications of this step. Note that the same type of decorator may be present multiple times and no order is guaranteed. The returned tuple contains: - The decorator's name (shortest possible) - The decorator's fully qualified name (in the case of Metaflow decorators, this will indicate which extension the decorator comes from) - A list of positional arguments - A dictionary of keyword arguments You can use the resulting tuple to remove a decorator for example Yields ------ str, str, List[Any], Dict[str, Any] A tuple containing the decorator name, it's fully qualified name, a list of positional arguments, and a dictionary of keyword arguments. """ for deco in self._my_step.decorators: # 3.7 does not support yield foo, *bar syntax so we # work around r = [ deco.name, "%s.%s" % ( deco.__class__.__module__, deco.__class__.__name__, ), ] r.extend(deco.get_args_kwargs()) yield tuple(r) for deco in self._my_step.wrappers: r = [ UserStepDecoratorBase.get_decorator_name(deco.__class__), deco.decorator_name, ] r.extend(deco.get_args_kwargs()) yield tuple(r) for deco in self._my_step.config_decorators: r = [ UserStepDecoratorBase.get_decorator_name(deco.__class__), deco.decorator_name, ] r.extend(deco.get_args_kwargs()) yield tuple(r) def add_decorator( self, deco_type: Union[partial, UserStepDecoratorBase, str], deco_args: Optional[List[Any]] = None, deco_kwargs: Optional[Dict[str, Any]] = None, duplicates: int = IGNORE, ) -> None: """ Add a Metaflow step-decorator or a user step-decorator to a step. You can either add the decorator itself or its decorator specification for it (the same you would get back from decorator_specs). You can also mix and match but you cannot provide arguments both through the string and the deco_args/deco_kwargs. As an example: ``` from metaflow import environment ... my_step.add_decorator(environment, deco_kwargs={"vars": {"foo": 42})} ``` is equivalent to: ``` my_step.add_decorator('environment:vars={"foo": 42}') ``` is equivalent to: ``` my_step.add_decorator('environment', deco_kwargs={"vars":{"foo": 42}}) ``` but this is not allowed: ``` my_step.add_decorator('environment:vars={"bar" 43}', deco_kwargs={"vars":{"foo": 42}}) ``` Note in the case where you specify a string for the decorator, there is no need to import the decorator. The string syntax is useful to, for example, allow decorators to be stored as strings in a configuration file. You can only add StepMutators in the pre_mutate stage. In terms of precedence for decorators: - if a decorator can be applied multiple times (like `@card`) all decorators added are kept. - if `duplicates` is set to `MutableStep.IGNORE`, then the decorator being added is ignored (in other words, the existing decorator has precedence). - if `duplicates` is set to `MutableStep.OVERRIDE`, then the *existing* decorator is removed and this newly added one replaces it (in other words, the newly added decorator has precedence). - if `duplicates` is set to `MutableStep.ERROR`, then an error is raised but only if the newly added decorator is *static* (ie: defined directly in the code). If not, it is ignored. Parameters ---------- deco_type : Union[partial, UserStepDecoratorBase, str] The decorator class to add to this step. deco_args : List[Any], optional, default None Positional arguments to pass to the decorator. deco_kwargs : Dict[str, Any], optional, default None Keyword arguments to pass to the decorator. duplicates : int, default MutableStep.IGNORE Instruction on how to handle duplicates. It can be one of: - `MutableStep.IGNORE`: Ignore the decorator if it already exists. - `MutableStep.ERROR`: Raise an error if the decorator already exists. - `MutableStep.OVERRIDE`: Remove the existing decorator and add this one. """ # Prevent circular import from metaflow.decorators import ( DuplicateStepDecoratorException, StepDecorator, extract_step_decorator_from_decospec, ) deco_args = deco_args or [] deco_kwargs = deco_kwargs or {} def _add_step_decorator(step_deco): if deco_args: raise MetaflowException( "Step decorators do not take additional positional arguments" ) # Update kwargs: step_deco.attributes.update(deco_kwargs) # Check duplicates def _do_add(): step_deco.statically_defined = self._statically_defined step_deco.inserted_by = self._inserted_by self._my_step.decorators.append(step_deco) debug.userconf_exec( "Mutable step adding step decorator '%s' to step '%s'" % (deco_type, self._my_step.name) ) existing_deco = [ d for d in self._my_step.decorators if d.name == step_deco.name ] if step_deco.allow_multiple or not existing_deco: _do_add() elif duplicates == MutableStep.IGNORE: # If we ignore, we do not add the decorator debug.userconf_exec( "Mutable step ignoring step decorator '%s' on step '%s' " "(already exists and duplicates are ignored)" % (step_deco.name, self._my_step.name) ) elif duplicates == MutableStep.OVERRIDE: # If we override, we remove the existing decorator and add this one debug.userconf_exec( "Mutable step overriding step decorator '%s' on step '%s' " "(removing existing decorator and adding new one)" % (step_deco.name, self._my_step.name) ) self._my_step.decorators = [ d for d in self._my_step.decorators if d.name != step_deco.name ] _do_add() elif duplicates == MutableStep.ERROR: # If we error, we raise an exception if self._statically_defined: raise DuplicateStepDecoratorException(step_deco.name, self._my_step) else: debug.userconf_exec( "Mutable step ignoring step decorator '%s' on step '%s' " "(already exists and non statically defined)" % (step_deco.name, self._my_step.name) ) else: raise ValueError("Invalid duplicates value: %s" % duplicates) if isinstance(deco_type, str): step_deco, has_args_kwargs = extract_step_decorator_from_decospec(deco_type) if (deco_args or deco_kwargs) and has_args_kwargs: raise MetaflowException( "Cannot specify additional arguments when adding a user step " "decorator using a decospec that already has arguments" ) if isinstance(step_deco, StepDecorator): _add_step_decorator(step_deco) else: # User defined decorator. if not self._pre_mutate and isinstance(step_deco, StepMutator): raise MetaflowException( "Adding step mutator '%s' from %s is only allowed in the " "`pre_mutate` method and not the `mutate` method" % (step_deco.decorator_name, self._inserted_by) ) if deco_args or deco_kwargs: # We need to recreate the object if there were args or kwargs # since they were not in the string step_deco = step_deco.__class__(*deco_args, **deco_kwargs) step_deco.add_or_raise( self._my_step, self._statically_defined, duplicates, self._inserted_by, ) return if isinstance(deco_type, type) and issubclass(deco_type, UserStepDecoratorBase): # We can only add step mutators in the pre mutate stage. if not self._pre_mutate and issubclass(deco_type, StepMutator): raise MetaflowException( "Adding step mutator '%s' from %s is only allowed in the " "`pre_mutate` method and not the `mutate` method" % (step_deco.decorator_name, self._inserted_by) ) debug.userconf_exec( "Mutable step adding decorator %s to step %s" % (deco_type, self._my_step.name) ) d = deco_type(*deco_args, **deco_kwargs) # add_or_raise properly registers the decorator d.add_or_raise( self._my_step, self._statically_defined, duplicates, self._inserted_by ) return # At this point, it should be a regular Metaflow step decorator if ( not isinstance(deco_type, partial) or len(deco_type.args) != 1 or not issubclass(deco_type.args[0], StepDecorator) ): raise TypeError( "add_decorator takes a metaflow decorator or user StepDecorator" ) deco_type = deco_type.args[0] _add_step_decorator( deco_type( attributes=deco_kwargs, statically_defined=self._statically_defined, inserted_by=self._inserted_by, ) ) def remove_decorator( self, deco_name: str, deco_args: Optional[List[Any]] = None, deco_kwargs: Optional[Dict[str, Any]] = None, ) -> bool: """ Remove a step-level decorator. To remove a decorator, you can pass the decorator specification (obtained from `decorator_specs` for example). Note that if multiple decorators share the same decorator specification (very rare), they will all be removed. You can only remove StepMutators in the `pre_mutate` method. Parameters ---------- deco_name : str Decorator specification of the decorator to remove. If nothing else is specified, all decorators matching that name will be removed. deco_args : List[Any], optional, default None Positional arguments to match the decorator specification. deco_kwargs : Dict[str, Any], optional, default None Keyword arguments to match the decorator specification. Returns ------- bool Returns True if a decorator was removed. """ do_all = deco_args is None and deco_kwargs is None did_remove = False canonical_deco_type = UserStepDecoratorBase.get_decorator_by_name(deco_name) if issubclass(canonical_deco_type, UserStepDecoratorBase): for attr in ["config_decorators", "wrappers"]: new_deco_list = [] for deco in getattr(self._my_step, attr): if deco.decorator_name == canonical_deco_type.decorator_name: if do_all: continue # We remove all decorators with this name if deco.get_args_kwargs() == ( deco_args or [], deco_kwargs or {}, ): if not self._pre_mutate and isinstance(deco, StepMutator): raise MetaflowException( "Removing step mutator '%s' from %s is only allowed in the " "`pre_mutate` method and not the `mutate` method" % (deco.decorator_name, self._inserted_by) ) did_remove = True debug.userconf_exec( "Mutable step removing user step decorator '%s' from step '%s'" % (deco.decorator_name, self._my_step.name) ) else: new_deco_list.append(deco) else: new_deco_list.append(deco) setattr(self._my_step, attr, new_deco_list) if did_remove: return True new_deco_list = [] for deco in self._my_step.decorators: if deco.name == deco_name: if do_all: continue # We remove all decorators with this name # Check if the decorator specification matches if deco.get_args_kwargs() == (deco_args, deco_kwargs): did_remove = True debug.userconf_exec( "Mutable step removing step decorator '%s' from step '%s'" % (deco.name, self._my_step.name) ) else: new_deco_list.append(deco) else: new_deco_list.append(deco) self._my_step.decorators = new_deco_list if did_remove: return True debug.userconf_exec( "Mutable step did not find decorator '%s' to remove from step '%s'" % (deco_name, self._my_step.name) ) return False
MutableStep
python
pytorch__pytorch
torch/_inductor/codegen/common.py
{ "start": 30542, "end": 37762 }
class ____(BasicMathOpsMixin, OpDecompositions, OpsHandler[Any]): @staticmethod def paren(string: OpVarT) -> OpVarT: if ( isinstance(string, CSEVariable) or _RE_PAREN_NOT_NEEDED.fullmatch(string) or _all_in_parens(string) ): # don't put extra parens for strings that are already wrapped in parens # pyrefly: ignore [bad-return] return string return f"({string})" @staticmethod def constant(value: Union[bool, float, int], dtype: torch.dtype) -> OpVarT: return repr(value) @staticmethod def bitwise_not(x: OpVarT) -> OpVarT: return f"~{OpOverrides.paren(x)}" @staticmethod def logical_not(a: OpVarT) -> OpVarT: return f"{OpOverrides.paren(a)} == 0" @staticmethod def bitwise_and(x: OpVarT, y: OpVarT) -> OpVarT: return f"{OpOverrides.paren(x)} & {OpOverrides.paren(y)}" @staticmethod def bitwise_or(x: OpVarT, y: OpVarT) -> OpVarT: return f"{OpOverrides.paren(x)} | {OpOverrides.paren(y)}" @staticmethod def bitwise_xor(x: OpVarT, y: OpVarT) -> OpVarT: return f"{OpOverrides.paren(x)} ^ {OpOverrides.paren(y)}" @staticmethod def bitwise_left_shift(x: OpVarT, y: OpVarT) -> OpVarT: return f"{OpOverrides.paren(x)} << {OpOverrides.paren(y)}" @staticmethod def bitwise_right_shift(x: OpVarT, y: OpVarT) -> OpVarT: return f"{OpOverrides.paren(x)} >> {OpOverrides.paren(y)}" @staticmethod def int_truediv(a: OpVarT, b: OpVarT) -> OpVarT: # TODO: this is wrong # TODO: an easy bandaid is to generate runtime asserts that it's # <= 2**53, which is when this equation is correct return ops.truediv(a, b) @staticmethod def load_seed(name: str, offset: OpVarT) -> OpVarT: return ops.load(name, sympy.Integer(offset)) def indirect_indexing( self, var: OpVarT, size: Union[sympy.Expr, int], check: bool = True, wrap_neg: bool = True, ) -> sympy.Symbol: return sympy_index_symbol(str(var)) def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool ) -> None: raise NotImplementedError( f"{type(self).__name__}: check_bounds should be handled by CSEProxy" ) def load(self, name: str, index: sympy.Expr) -> OpVarT: raise NotImplementedError( f"{type(self).__name__}: load should be handled by CSEProxy" ) def store( self, name: str, index: sympy.Expr, value: OpVarT, mode: StoreMode = None ) -> None: raise NotImplementedError( f"{type(self).__name__}: store should be handled by CSEProxy" ) def device_assert_async(self, cond: CSEVariable, msg: str) -> None: raise NotImplementedError( f"{type(self).__name__}: device_assert_async should be handled by CSEProxy" ) def store_reduction(self, name: str, index: sympy.Expr, value: OpVarT) -> None: raise NotImplementedError( f"{type(self).__name__}: store_reduction should be handled by CSEProxy" ) def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[OpVarT, tuple[OpVarT, ...]], ) -> Union[OpVarT, tuple[OpVarT, ...]]: raise NotImplementedError( f"{type(self).__name__}: reduction should be handled by CSEProxy" ) def scan( self, dtypes: tuple[torch.dtype, ...], combine_fn: Callable[ [tuple[OpVarT, ...], tuple[OpVarT, ...]], tuple[OpVarT, ...], ], values: tuple[OpVarT, ...], ) -> tuple[OpVarT, ...]: raise NotImplementedError( f"{type(self).__name__}: scan should be handled by CSEProxy" ) def sort( self, dtypes: tuple[torch.dtype, ...], values: tuple[OpVarT, ...], stable: bool, descending: bool, ) -> tuple[OpVarT, ...]: raise NotImplementedError( f"{type(self).__name__}: sort should be handled by CSEProxy" ) def bucketize( self, values: OpVarT, boundaries: tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: OpVarT, indexing_dtype: torch.dtype, right: bool, sorter: Optional[tuple[str, sympy.Expr]] = None, sorter_indices: Optional[OpVarT] = None, ) -> OpVarT: raise NotImplementedError( f"{type(self).__name__}: bucketize should be handled by CSEProxy" ) def halide_clamp(self, value: OpVarT, size: sympy.Expr, check: bool) -> OpVarT: raise NotImplementedError( f"{type(self).__name__}: halide_clamp only implemented for Halide backend" ) def dot(self, x: OpVarT, y: OpVarT) -> OpVarT: raise NotImplementedError( f"{type(self).__name__}: dot only implemented for Triton backend" ) def inline_asm_elementwise( self, *inputs: OpVarT, asm: str, constraints: Optional[str] = None, dtype: torch.dtype = torch.float32, is_pure: bool = True, pack: int = 1, ) -> OpVarT: raise NotImplementedError( f"{type(self).__name__}: inline_asm_elementwise only implemented for Triton backend" ) def output(self, *args: OpVarT) -> None: raise AssertionError( f"{type(self).__name__}: ops.output should not appear at codegen time" ) def placeholder(self, index: int) -> OpVarT: raise AssertionError( f"{type(self).__name__}: ops.placeholder should not appear at codegen time" ) @staticmethod def _unimplemented(name: str) -> Callable[..., OpVarT]: def unimplemented(self: OpOverrides, *args: Any, **kwargs: Any) -> OpVarT: raise NotImplementedError( f"{type(self).__name__} does not implement ops.{name}" ) unimplemented.__name__ = name unimplemented.is_unimplemented = True # type: ignore[attr-defined] return unimplemented @classmethod def _is_unimplemented(cls, name: str) -> bool: fn = getattr(cls, name, None) default_fn = getattr(OpsHandler, name, None) return not fn or fn == default_fn or getattr(fn, "is_unimplemented", False) @classmethod def _initialize_pointwise_overrides(cls, target: str) -> None: assert target in ("triton", "cpp", "cppvec", "halide", "mps"), target for funcname, data in pointwise_overrides_data.items(): impl = getattr(data, target) if impl is None: if cls._is_unimplemented(funcname): setattr(cls, funcname, cls._unimplemented(funcname)) else: assert funcname not in cls.__dict__, ( f"multiple definitions of {funcname} on {cls.__name__}" ) impl.__name__ = funcname setattr(cls, funcname, staticmethod(impl)) @dataclasses.dataclass
OpOverrides
python
huggingface__transformers
src/transformers/models/deformable_detr/modeling_deformable_detr.py
{ "start": 8491, "end": 12692 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None enc_outputs_class: Any = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr
DeformableDetrObjectDetectionOutput
python
fastapi__sqlmodel
docs_src/tutorial/fastapi/relationships/tutorial001.py
{ "start": 464, "end": 506 }
class ____(TeamBase): id: int
TeamPublic
python
PrefectHQ__prefect
src/integrations/prefect-github/tests/test_repository.py
{ "start": 275, "end": 6610 }
class ____: async def test_subprocess_errors_are_surfaced(self): """Ensure that errors from GitHub are being surfaced to users.""" g = GitHubRepository(repository_url="incorrect-url-scheme") with pytest.raises( RuntimeError, match="fatal: repository 'incorrect-url-scheme' does not exist", ): await g.get_directory() async def test_repository_default(self, monkeypatch): """Ensure that default command is 'git clone <repo name>' when given just a repo. """ class p: returncode = 0 mock = AsyncMock(return_value=p()) monkeypatch.setattr(prefect_github.repository, "run_process", mock) g = GitHubRepository(repository_url="prefect") await g.get_directory() assert mock.await_count == 1 assert ["git", "clone", "prefect"] == mock.await_args[0][0][:3] async def test_reference_default(self, monkeypatch): """Ensure that default command is 'git clone <repo name> -b <reference> --depth 1' # noqa: E501 when just a repository and reference are given. """ class p: returncode = 0 mock = AsyncMock(return_value=p()) monkeypatch.setattr(prefect_github.repository, "run_process", mock) g = GitHubRepository(repository_url="prefect", reference="2.0.0") await g.get_directory() assert mock.await_count == 1 assert "git clone prefect -b 2.0.0 --depth 1" in " ".join(mock.await_args[0][0]) async def test_token_added_correctly_from_credential(self, monkeypatch): """Ensure that the repo url is in the format `https://<oauth-key>@github.com/<username>/<repo>.git`.""" # noqa: E501 class p: returncode = 0 mock = AsyncMock(return_value=p()) monkeypatch.setattr(prefect_github.repository, "run_process", mock) credential = GitHubCredentials(token="XYZ") g = GitHubRepository( repository_url="https://github.com/PrefectHQ/prefect.git", credentials=credential, ) await g.get_directory() assert mock.await_count == 1 assert ( "git clone https://XYZ@github.com/PrefectHQ/prefect.git --depth 1" in " ".join(mock.await_args[0][0]) ) def setup_test_directory( self, tmp_src: str, sub_dir: str = "puppy" ) -> Tuple[str, str]: """Add files and directories to a temporary directory. Returns a tuple with the expected parent-level contents and the expected child-level contents. """ # add file to tmp_src f1_name = "dog.text" f1_path = Path(tmp_src) / f1_name f1 = open(f1_path, "w") f1.close() # add sub-directory to tmp_src sub_dir_path = Path(tmp_src) / sub_dir os.mkdir(sub_dir_path) # add file to sub-directory f2_name = "cat.txt" f2_path = sub_dir_path / f2_name f2 = open(f2_path, "w") f2.close() parent_contents = {f1_name, sub_dir} child_contents = {f2_name} assert set(os.listdir(tmp_src)) == parent_contents assert set(os.listdir(sub_dir_path)) == child_contents return parent_contents, child_contents class MockTmpDir: """Utility for having `TemporaryDirectory` return a known location.""" dir = None def __init__(self, *args, **kwargs): pass def __enter__(self): return self.dir def __exit__(self, *args, **kwargs): pass async def test_dir_contents_copied_correctly_with_get_directory(self, monkeypatch): # noqa """Check that `get_directory` is able to correctly copy contents from src->dst""" # noqa class p: returncode = 0 mock = AsyncMock(return_value=p()) monkeypatch.setattr(prefect_github.repository, "run_process", mock) sub_dir_name = "puppy" with TemporaryDirectory() as tmp_src: parent_contents, child_contents = self.setup_test_directory( tmp_src, sub_dir_name ) self.MockTmpDir.dir = tmp_src # move file contents to tmp_dst with TemporaryDirectory() as tmp_dst: monkeypatch.setattr( prefect_github.repository, "TemporaryDirectory", self.MockTmpDir, ) g = GitHubRepository( repository_url="https://github.com/PrefectHQ/prefect.git", ) await g.get_directory(local_path=tmp_dst) assert set(os.listdir(tmp_dst)) == parent_contents assert set(os.listdir(Path(tmp_dst) / sub_dir_name)) == child_contents async def test_dir_contents_copied_correctly_with_get_directory_and_from_path( self, monkeypatch ): # noqa """Check that `get_directory` is able to correctly copy contents from src->dst when `from_path` is included. It is expected that the directory specified by `from_path` will be moved to the specified destination, along with all of its contents. """ class p: returncode = 0 mock = AsyncMock(return_value=p()) monkeypatch.setattr(prefect_github.repository, "run_process", mock) sub_dir_name = "puppy" with TemporaryDirectory() as tmp_src: parent_contents, child_contents = self.setup_test_directory( tmp_src, sub_dir_name ) self.MockTmpDir.dir = tmp_src # move file contents to tmp_dst with TemporaryDirectory() as tmp_dst: monkeypatch.setattr( prefect_github.repository, "TemporaryDirectory", self.MockTmpDir, ) g = GitHubRepository( repository_url="https://github.com/PrefectHQ/prefect.git", ) await g.get_directory(local_path=tmp_dst, from_path=sub_dir_name) assert set(os.listdir(tmp_dst)) == set([sub_dir_name]) assert set(os.listdir(Path(tmp_dst) / sub_dir_name)) == child_contents
TestGitHubRepository
python
huggingface__transformers
tests/models/tvp/test_modeling_tvp.py
{ "start": 6404, "end": 9190 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as TVP does not use, inputs_embeds. The seq_length in TVP contain textual and visual inputs, and prompt. """ all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding} if is_torch_available() else {} ) # TODO: Enable this once this model gets more usage def setUp(self): self.model_tester = TVPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="TVP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TVPModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @require_timm def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # Confirm out_indices propagated to backbone if model.__class__.__name__ == "TvpModel": self.assertEqual(len(model.vision_model.backbone.out_indices), 2) elif model.__class__.__name__ == "TvpForVideoGrounding": self.assertEqual(len(model.model.vision_model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Force load_backbone path config.is_hybrid = False # We load through configs, as the modeling file assumes config.backbone_config is always set config.use_pretrained_backbone = False config.backbone_kwargs = None # Load a timm backbone # We hack adding hidden_sizes to the config to test the backbone loading backbone_config = TimmBackboneConfig("resnet18", out_indices=[-2, -1], hidden_sizes=[64, 128]) config.backbone_config = backbone_config _validate_backbone_init() # Load a HF backbone backbone_config = ResNetConfig.from_pretrained("facebook/dinov2-small", out_indices=[-2, -1]) config.backbone_config = backbone_config _validate_backbone_init() # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_torch @slow
TVPModelTest
python
sympy__sympy
sympy/stats/frv_types.py
{ "start": 12725, "end": 14454 }
class ____(SingleFiniteDistribution): _argnames = ('n', 'alpha', 'beta') @staticmethod def check(n, alpha, beta): _value_check((n.is_integer, n.is_nonnegative), "'n' must be nonnegative integer. n = %s." % str(n)) _value_check((alpha > 0), "'alpha' must be: alpha > 0 . alpha = %s" % str(alpha)) _value_check((beta > 0), "'beta' must be: beta > 0 . beta = %s" % str(beta)) @property def high(self): return self.n @property def low(self): return S.Zero @property def is_symbolic(self): return not self.n.is_number @property def set(self): if self.is_symbolic: return Intersection(S.Naturals0, Interval(0, self.n)) return set(map(Integer, range(self.n + 1))) def pmf(self, k): n, a, b = self.n, self.alpha, self.beta return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b) def BetaBinomial(name, n, alpha, beta): r""" Create a Finite Random Variable representing a Beta-binomial distribution. Parameters ========== n : Positive Integer Represents number of trials alpha : Real positive number beta : Real positive number Examples ======== >>> from sympy.stats import BetaBinomial, density >>> X = BetaBinomial('X', 2, 1, 1) >>> density(X).dict {0: 1/3, 1: 2*beta(2, 2), 2: 1/3} Returns ======= RandomSymbol References ========== .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution .. [2] https://mathworld.wolfram.com/BetaBinomialDistribution.html """ return rv(name, BetaBinomialDistribution, n, alpha, beta)
BetaBinomialDistribution
python
Netflix__metaflow
metaflow/monitor.py
{ "start": 4621, "end": 5323 }
class ____(Metric): def __init__(self, name, env=None): super(Gauge, self).__init__(GAUGE_TYPE, name, env) self._value = 0 def set_value(self, val): self._value = val def increment(self): self._value += 1 @property def value(self): return self._value def serialize(self): parent_ser = super(Gauge, self).serialize() parent_ser["_value"] = self._value return parent_ser @classmethod def deserialize(cls, metric_name, value): g = Gauge(metric_name) g.set_value(value.get("_value", 0)) return g _str_type_to_type = {COUNTER_TYPE: Counter, GAUGE_TYPE: Gauge, TIMER_TYPE: Timer}
Gauge
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/auto_suggest.py
{ "start": 1059, "end": 1331 }
class ____: """ Suggestion returned by an auto-suggest algorithm. :param text: The suggestion text. """ def __init__(self, text: str) -> None: self.text = text def __repr__(self) -> str: return f"Suggestion({self.text})"
Suggestion
python
giampaolo__psutil
psutil/_pslinux.py
{ "start": 24150, "end": 40920 }
class ____: """A wrapper on top of /proc/net/* files, retrieving per-process and system-wide open connections (TCP, UDP, UNIX) similarly to "netstat -an". Note: in case of UNIX sockets we're only able to determine the local endpoint/path, not the one it's connected to. According to [1] it would be possible but not easily. [1] http://serverfault.com/a/417946 """ def __init__(self): # The string represents the basename of the corresponding # /proc/net/{proto_name} file. tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) unix = ("unix", socket.AF_UNIX, None) self.tmap = { "all": (tcp4, tcp6, udp4, udp6, unix), "tcp": (tcp4, tcp6), "tcp4": (tcp4,), "tcp6": (tcp6,), "udp": (udp4, udp6), "udp4": (udp4,), "udp6": (udp6,), "unix": (unix,), "inet": (tcp4, tcp6, udp4, udp6), "inet4": (tcp4, udp4), "inet6": (tcp6, udp6), } self._procfs_path = None def get_proc_inodes(self, pid): inodes = defaultdict(list) for fd in os.listdir(f"{self._procfs_path}/{pid}/fd"): try: inode = readlink(f"{self._procfs_path}/{pid}/fd/{fd}") except (FileNotFoundError, ProcessLookupError): # ENOENT == file which is gone in the meantime; # os.stat(f"/proc/{self.pid}") will be done later # to force NSP (if it's the case) continue except OSError as err: if err.errno == errno.EINVAL: # not a link continue if err.errno == errno.ENAMETOOLONG: # file name too long debug(err) continue raise else: if inode.startswith('socket:['): # the process is using a socket inode = inode[8:][:-1] inodes[inode].append((pid, int(fd))) return inodes def get_all_inodes(self): inodes = {} for pid in pids(): try: inodes.update(self.get_proc_inodes(pid)) except (FileNotFoundError, ProcessLookupError, PermissionError): # os.listdir() is gonna raise a lot of access denied # exceptions in case of unprivileged user; that's fine # as we'll just end up returning a connection with PID # and fd set to None anyway. # Both netstat -an and lsof does the same so it's # unlikely we can do any better. # ENOENT just means a PID disappeared on us. continue return inodes @staticmethod def decode_address(addr, family): """Accept an "ip:port" address as displayed in /proc/net/* and convert it into a human readable form, like: "0500000A:0016" -> ("10.0.0.5", 22) "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) The IP address portion is a little or big endian four-byte hexadecimal number; that is, the least significant byte is listed first, so we need to reverse the order of the bytes to convert it to an IP address. The port is represented as a two-byte hexadecimal number. Reference: http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html """ ip, port = addr.split(':') port = int(port, 16) # this usually refers to a local socket in listen mode with # no end-points connected if not port: return () ip = ip.encode('ascii') if family == socket.AF_INET: # see: https://github.com/giampaolo/psutil/issues/201 if LITTLE_ENDIAN: ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) else: ip = socket.inet_ntop(family, base64.b16decode(ip)) else: # IPv6 ip = base64.b16decode(ip) try: # see: https://github.com/giampaolo/psutil/issues/201 if LITTLE_ENDIAN: ip = socket.inet_ntop( socket.AF_INET6, struct.pack('>4I', *struct.unpack('<4I', ip)), ) else: ip = socket.inet_ntop( socket.AF_INET6, struct.pack('<4I', *struct.unpack('<4I', ip)), ) except ValueError: # see: https://github.com/giampaolo/psutil/issues/623 if not supports_ipv6(): raise _Ipv6UnsupportedError from None raise return ntp.addr(ip, port) @staticmethod def process_inet(file, family, type_, inodes, filter_pid=None): """Parse /proc/net/tcp* and /proc/net/udp* files.""" if file.endswith('6') and not os.path.exists(file): # IPv6 not supported return with open_text(file) as f: f.readline() # skip the first line for lineno, line in enumerate(f, 1): try: _, laddr, raddr, status, _, _, _, _, _, inode = ( line.split()[:10] ) except ValueError: msg = ( f"error while parsing {file}; malformed line" f" {lineno} {line!r}" ) raise RuntimeError(msg) from None if inode in inodes: # # We assume inet sockets are unique, so we error # # out if there are multiple references to the # # same inode. We won't do this for UNIX sockets. # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: # raise ValueError("ambiguous inode with multiple " # "PIDs references") pid, fd = inodes[inode][0] else: pid, fd = None, -1 if filter_pid is not None and filter_pid != pid: continue else: if type_ == socket.SOCK_STREAM: status = TCP_STATUSES[status] else: status = _common.CONN_NONE try: laddr = NetConnections.decode_address(laddr, family) raddr = NetConnections.decode_address(raddr, family) except _Ipv6UnsupportedError: continue yield (fd, family, type_, laddr, raddr, status, pid) @staticmethod def process_unix(file, family, inodes, filter_pid=None): """Parse /proc/net/unix files.""" with open_text(file) as f: f.readline() # skip the first line for line in f: tokens = line.split() try: _, _, _, _, type_, _, inode = tokens[0:7] except ValueError: if ' ' not in line: # see: https://github.com/giampaolo/psutil/issues/766 continue msg = ( f"error while parsing {file}; malformed line {line!r}" ) raise RuntimeError(msg) # noqa: B904 if inode in inodes: # noqa: SIM108 # With UNIX sockets we can have a single inode # referencing many file descriptors. pairs = inodes[inode] else: pairs = [(None, -1)] for pid, fd in pairs: if filter_pid is not None and filter_pid != pid: continue else: path = tokens[-1] if len(tokens) == 8 else '' type_ = _common.socktype_to_enum(int(type_)) # XXX: determining the remote endpoint of a # UNIX socket on Linux is not possible, see: # https://serverfault.com/questions/252723/ raddr = "" status = _common.CONN_NONE yield (fd, family, type_, path, raddr, status, pid) def retrieve(self, kind, pid=None): self._procfs_path = get_procfs_path() if pid is not None: inodes = self.get_proc_inodes(pid) if not inodes: # no connections for this process return [] else: inodes = self.get_all_inodes() ret = set() for proto_name, family, type_ in self.tmap[kind]: path = f"{self._procfs_path}/net/{proto_name}" if family in {socket.AF_INET, socket.AF_INET6}: ls = self.process_inet( path, family, type_, inodes, filter_pid=pid ) else: ls = self.process_unix(path, family, inodes, filter_pid=pid) for fd, family, type_, laddr, raddr, status, bound_pid in ls: if pid: conn = ntp.pconn(fd, family, type_, laddr, raddr, status) else: conn = ntp.sconn( fd, family, type_, laddr, raddr, status, bound_pid ) ret.add(conn) return list(ret) _net_connections = NetConnections() def net_connections(kind='inet'): """Return system-wide open connections.""" return _net_connections.retrieve(kind) def net_io_counters(): """Return network I/O statistics for every network interface installed on the system as a dict of raw tuples. """ with open_text(f"{get_procfs_path()}/net/dev") as f: lines = f.readlines() retdict = {} for line in lines[2:]: colon = line.rfind(':') assert colon > 0, repr(line) name = line[:colon].strip() fields = line[colon + 1 :].strip().split() ( # in bytes_recv, packets_recv, errin, dropin, _fifoin, # unused _framein, # unused _compressedin, # unused _multicastin, # unused # out bytes_sent, packets_sent, errout, dropout, _fifoout, # unused _collisionsout, # unused _carrierout, # unused _compressedout, # unused ) = map(int, fields) retdict[name] = ( bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout, ) return retdict def net_if_stats(): """Get NIC stats (isup, duplex, speed, mtu).""" duplex_map = { cext.DUPLEX_FULL: NIC_DUPLEX_FULL, cext.DUPLEX_HALF: NIC_DUPLEX_HALF, cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN, } names = net_io_counters().keys() ret = {} for name in names: try: mtu = cext.net_if_mtu(name) flags = cext.net_if_flags(name) duplex, speed = cext.net_if_duplex_speed(name) except OSError as err: # https://github.com/giampaolo/psutil/issues/1279 if err.errno != errno.ENODEV: raise debug(err) else: output_flags = ','.join(flags) isup = 'running' in flags ret[name] = ntp.snicstats( isup, duplex_map[duplex], speed, mtu, output_flags ) return ret # ===================================================================== # --- disks # ===================================================================== disk_usage = _psposix.disk_usage def disk_io_counters(perdisk=False): """Return disk I/O statistics for every disk installed on the system as a dict of raw tuples. """ def read_procfs(): # OK, this is a bit confusing. The format of /proc/diskstats can # have 3 variations. # On Linux 2.4 each line has always 15 fields, e.g.: # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" # On Linux 2.6+ each line *usually* has 14 fields, and the disk # name is in another position, like this: # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" # ...unless (Linux 2.6) the line refers to a partition instead # of a disk, in which case the line has less fields (7): # "3 1 hda1 8 8 8 8" # 4.18+ has 4 fields added: # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" # 5.5 has 2 more fields. # See: # https://www.kernel.org/doc/Documentation/iostats.txt # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats with open_text(f"{get_procfs_path()}/diskstats") as f: lines = f.readlines() for line in lines: fields = line.split() flen = len(fields) # fmt: off if flen == 15: # Linux 2.4 name = fields[3] reads = int(fields[2]) (reads_merged, rbytes, rtime, writes, writes_merged, wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) elif flen == 14 or flen >= 18: # Linux 2.6+, line referring to a disk name = fields[2] (reads, reads_merged, rbytes, rtime, writes, writes_merged, wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) elif flen == 7: # Linux 2.6+, line referring to a partition name = fields[2] reads, rbytes, writes, wbytes = map(int, fields[3:]) rtime = wtime = reads_merged = writes_merged = busy_time = 0 else: msg = f"not sure how to interpret line {line!r}" raise ValueError(msg) yield (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, writes_merged, busy_time) # fmt: on def read_sysfs(): for block in os.listdir('/sys/block'): for root, _, files in os.walk(os.path.join('/sys/block', block)): if 'stat' not in files: continue with open_text(os.path.join(root, 'stat')) as f: fields = f.read().strip().split() name = os.path.basename(root) # fmt: off (reads, reads_merged, rbytes, rtime, writes, writes_merged, wbytes, wtime, _, busy_time) = map(int, fields[:10]) yield (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, writes_merged, busy_time) # fmt: on if os.path.exists(f"{get_procfs_path()}/diskstats"): gen = read_procfs() elif os.path.exists('/sys/block'): gen = read_sysfs() else: msg = ( f"{get_procfs_path()}/diskstats nor /sys/block are available on" " this system" ) raise NotImplementedError(msg) retdict = {} for entry in gen: # fmt: off (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, writes_merged, busy_time) = entry if not perdisk and not is_storage_device(name): # perdisk=False means we want to calculate totals so we skip # partitions (e.g. 'sda1', 'nvme0n1p1') and only include # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks # include a total of all their partitions + some extra size # of their own: # $ cat /proc/diskstats # 259 0 sda 10485760 ... # 259 1 sda1 5186039 ... # 259 1 sda2 5082039 ... # See: # https://github.com/giampaolo/psutil/pull/1313 continue rbytes *= DISK_SECTOR_SIZE wbytes *= DISK_SECTOR_SIZE retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, writes_merged, busy_time) # fmt: on return retdict
NetConnections
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 201604, "end": 201947 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "item") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") item = sgqlc.types.Field("ProjectV2Item", graphql_name="item")
AddProjectV2ItemByIdPayload
python
matplotlib__matplotlib
lib/matplotlib/scale.py
{ "start": 14523, "end": 15665 }
class ____(LogScale): """ Provide an arbitrary scale with user-supplied function for the axis and then put on a logarithmic axes. """ name = 'functionlog' @_make_axis_parameter_optional def __init__(self, axis, functions, base=10): """ Parameters ---------- axis : `~matplotlib.axis.Axis` The axis for the scale. functions : (callable, callable) two-tuple of the forward and inverse functions for the scale. The forward function must be monotonic. Both functions must have the signature:: def forward(values: array-like) -> array-like base : float, default: 10 Logarithmic base of the scale. """ forward, inverse = functions self.subs = None self._transform = FuncTransform(forward, inverse) + LogTransform(base) @property def base(self): return self._transform._b.base # Base of the LogTransform. def get_transform(self): """Return the `.Transform` associated with this scale.""" return self._transform
FuncScaleLog
python
numba__llvmlite
llvmlite/binding/newpassmanagers.py
{ "start": 624, "end": 1647 }
class ____(_prunestats): """ Holds statistics from reference count pruning. """ def __add__(self, other): if not isinstance(other, PruneStats): msg = 'PruneStats can only be added to another PruneStats, got {}.' raise TypeError(msg.format(type(other))) return PruneStats(self.basicblock + other.basicblock, self.diamond + other.diamond, self.fanout + other.fanout, self.fanout_raise + other.fanout_raise) def __sub__(self, other): if not isinstance(other, PruneStats): msg = ('PruneStats can only be subtracted from another PruneStats, ' 'got {}.') raise TypeError(msg.format(type(other))) return PruneStats(self.basicblock - other.basicblock, self.diamond - other.diamond, self.fanout - other.fanout, self.fanout_raise - other.fanout_raise)
PruneStats
python
PyCQA__pylint
tests/functional/u/use/use_literal_dict.py
{ "start": 533, "end": 868 }
class ____: prop: dict = {"a": 1} inst = SomeClass() dict( # [use-dict-literal] url="/foo", **inst.prop, ) dict( # [use-dict-literal] Lorem="ipsum", dolor="sit", amet="consectetur", adipiscing="elit", sed="do", eiusmod="tempor", incididunt="ut", labore="et", dolore="magna", )
SomeClass
python
bokeh__bokeh
src/bokeh/models/widgets/groups.py
{ "start": 1658, "end": 2007 }
class ____(Widget): ''' Abstract base class for all kinds of groups. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) labels = List(String, help=""" List of text labels contained in this group. """) @abstract
AbstractGroup
python
celery__celery
celery/utils/time.py
{ "start": 13282, "end": 16112 }
class ____: """Version of ``dateutil.relativedelta`` that only supports addition.""" def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, hour=None, minute=None, second=None, microsecond=None, **kwargs: Any): # pylint: disable=redefined-outer-name # weekday is also a function in outer scope. self.year = year self.month = month self.weeks = weeks self.weekday = weekday self.day = day self.hour = hour self.minute = minute self.second = second self.microsecond = microsecond self.days = weeks * 7 self._has_time = self.hour is not None or self.minute is not None def __repr__(self) -> str: return reprcall('ffwd', (), self._fields(weeks=self.weeks, weekday=self.weekday)) def __radd__(self, other: Any) -> timedelta: if not isinstance(other, date): return NotImplemented year = self.year or other.year month = self.month or other.month day = min(monthrange(year, month)[1], self.day or other.day) ret = other.replace(**dict(dictfilter(self._fields()), year=year, month=month, day=day)) if self.weekday is not None: ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) return ret + timedelta(days=self.days) def _fields(self, **extra: Any) -> dict[str, Any]: return dictfilter({ 'year': self.year, 'month': self.month, 'day': self.day, 'hour': self.hour, 'minute': self.minute, 'second': self.second, 'microsecond': self.microsecond, }, **extra) def utcoffset( time: ModuleType = _time, localtime: Callable[..., _time.struct_time] = _time.localtime) -> float: """Return the current offset to UTC in hours.""" if localtime().tm_isdst: return time.altzone // 3600 return time.timezone // 3600 def adjust_timestamp(ts: float, offset: int, here: Callable[..., float] = utcoffset) -> float: """Adjust timestamp based on provided utcoffset.""" return ts - (offset - here()) * 3600 def get_exponential_backoff_interval( factor: int, retries: int, maximum: int, full_jitter: bool = False ) -> int: """Calculate the exponential backoff wait time.""" # Will be zero if factor equals 0 countdown = min(maximum, factor * (2 ** retries)) # Full jitter according to # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ if full_jitter: countdown = random.randrange(countdown + 1) # Adjust according to maximum wait time and account for negative values. return max(0, countdown)
ffwd