language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/convert_slow_tokenizer.py
{ "start": 34813, "end": 36216 }
class ____(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")), ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")), ], )
XLNetConverter
python
pytorch__pytorch
torchgen/model.py
{ "start": 75970, "end": 77779 }
class ____: @staticmethod def parse(t: str) -> Type: r = Type._parse(t) assert str(r) == t, f"{r} != {t}" return r @staticmethod def _parse(t: str) -> Type: m = re.match(r"^(.+)\?$", t) if m is not None: return OptionalType(Type.parse(m.group(1))) m = re.match(r"^(.+)\[([0-9]+)?\]$", t) if m is not None: size = int(m.group(2)) if m.group(2) is not None else None return ListType(elem=Type.parse(m.group(1)), size=size) # '__torch__.torch.classes.' is the prefix for custom class m = re.match(r"^__torch__\.torch\.classes\.([a-zA-Z0-9_.]+)$", t) if m is not None: return CustomClassType(m.group(1)) try: return BaseType(BaseTy[t]) except KeyError as e: raise RuntimeError(f"unrecognized type {t}") from e def __str__(self) -> str: raise NotImplementedError # WARNING: These concepts are not very well-defined. For example, # is "int?" nullable? How about "int?[]". They are defined # so we can conveniently generate legacy Declarations.yaml but # really we should probably just remove these at some point def is_base_ty_like(self, base_ty: BaseTy) -> bool: raise NotImplementedError def is_tensor_like(self) -> bool: return self.is_base_ty_like(BaseTy.Tensor) def is_generator_like(self) -> bool: return self.is_base_ty_like(BaseTy.Generator) def is_symint_like(self) -> bool: return self.is_base_ty_like(BaseTy.SymInt) def is_nullable(self) -> bool: raise NotImplementedError def is_list_like(self) -> ListType | None: raise NotImplementedError # Base types are simple, atomic types with no further structure
Type
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/asb.py
{ "start": 21691, "end": 32806 }
class ____(BaseAzureServiceBusHook): """ Interact with ServiceBusClient. This acts as a high level interface for getting ServiceBusSender and ServiceBusReceiver. """ def get_conn(self) -> ServiceBusClient: """Create and returns ServiceBusClient by using the connection string in connection details.""" conn = self.get_connection(self.conn_id) connection_string: str = str(conn.schema) if connection_string: client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True) else: extras = conn.extra_dejson credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name="credential") fully_qualified_namespace = self._get_field(extras=extras, field_name="fully_qualified_namespace") if not credential: managed_identity_client_id = self._get_field( extras=extras, field_name="managed_identity_client_id" ) workload_identity_tenant_id = self._get_field( extras=extras, field_name="workload_identity_tenant_id" ) credential = get_sync_default_azure_credential( managed_identity_client_id=managed_identity_client_id, workload_identity_tenant_id=workload_identity_tenant_id, ) client = ServiceBusClient( fully_qualified_namespace=fully_qualified_namespace, credential=credential, # type: ignore[arg-type] ) self.log.info("Create and returns ServiceBusClient") return client def send_message( self, queue_name: str, messages: str | list[str], batch_message_flag: bool = False, message_id: str | None = None, reply_to: str | None = None, message_headers: dict[str | bytes, int | float | bytes | bool | str | UUID] | None = None, ): """ Use ServiceBusClient Send to send message(s) to a Service Bus Queue. By using ``batch_message_flag``, it enables and send message as batch message. :param queue_name: The name of the queue or a QueueProperties with name. :param messages: Message which needs to be sent to the queue. It can be string or list of string. :param batch_message_flag: bool flag, can be set to True if message needs to be sent as batch message. :param message_id: Message ID to set on message being sent to the queue. Please note, message_id may only be set when a single message is sent. :param reply_to: Reply to which needs to be sent to the queue. :param message_headers: Headers to add to the message's application_properties field for Azure Service Bus. """ if queue_name is None: raise TypeError("Queue name cannot be None.") if not messages: raise ValueError("Messages list cannot be empty.") if message_id and isinstance(messages, list) and len(messages) != 1: raise TypeError("Message ID can only be set if a single message is sent.") with ( self.get_conn() as service_bus_client, service_bus_client.get_queue_sender(queue_name=queue_name) as sender, sender, ): message_creator = lambda msg_body: ServiceBusMessage( msg_body, message_id=message_id, reply_to=reply_to, application_properties=message_headers ) message_list = [messages] if isinstance(messages, str) else messages if not batch_message_flag: self.send_list_messages(sender, message_list, message_creator) else: self.send_batch_message(sender, message_list, message_creator) @staticmethod def send_list_messages( sender: ServiceBusSender, messages: list[str], message_creator: Callable[[str], ServiceBusMessage], ): list_messages = [message_creator(body) for body in messages] sender.send_messages(list_messages) @staticmethod def send_batch_message( sender: ServiceBusSender, messages: list[str], message_creator: Callable[[str], ServiceBusMessage], ): batch_message = sender.create_message_batch() for message in messages: batch_message.add_message(message_creator(message)) sender.send_messages(batch_message) def receive_message( self, queue_name: str, context: Context, max_message_count: int | None = 1, max_wait_time: float | None = None, message_callback: MessageCallback | None = None, ): """ Receive a batch of messages at once in a specified Queue name. :param queue_name: The name of the queue name or a QueueProperties with name. :param max_message_count: Maximum number of messages in the batch. :param max_wait_time: Maximum time to wait in seconds for the first message to arrive. :param message_callback: Optional callback to process each message. If not provided, then the message will be logged and completed. If provided, and throws an exception, the message will be abandoned for future redelivery. """ if queue_name is None: raise TypeError("Queue name cannot be None.") with ( self.get_conn() as service_bus_client, service_bus_client.get_queue_receiver(queue_name=queue_name) as receiver, receiver, ): received_msgs = receiver.receive_messages( max_message_count=max_message_count, max_wait_time=max_wait_time ) for msg in received_msgs: self._process_message(msg, context, message_callback, receiver) def receive_subscription_message( self, topic_name: str, subscription_name: str, context: Context, max_message_count: int | None, max_wait_time: float | None, message_callback: MessageCallback | None = None, ): """ Receive a batch of subscription message at once. This approach is optimal if you wish to process multiple messages simultaneously, or perform an ad-hoc receive as a single call. :param subscription_name: The subscription name that will own the rule in topic :param topic_name: The topic that will own the subscription rule. :param max_message_count: Maximum number of messages in the batch. Actual number returned will depend on prefetch_count and incoming stream rate. Setting to None will fully depend on the prefetch config. The default value is 1. :param max_wait_time: Maximum time to wait in seconds for the first message to arrive. If no messages arrive, and no timeout is specified, this call will not return until the connection is closed. If specified, an no messages arrive within the timeout period, an empty list will be returned. """ if subscription_name is None: raise TypeError("Subscription name cannot be None.") if topic_name is None: raise TypeError("Topic name cannot be None.") with ( self.get_conn() as service_bus_client, service_bus_client.get_subscription_receiver( topic_name, subscription_name ) as subscription_receiver, subscription_receiver, ): received_msgs = subscription_receiver.receive_messages( max_message_count=max_message_count, max_wait_time=max_wait_time ) for msg in received_msgs: self._process_message(msg, context, message_callback, subscription_receiver) def read_message( self, queue_name: str, max_wait_time: float | None = None, ) -> ServiceBusReceivedMessage | None: """ Read a single message from a Service Bus queue without callback processing. :param queue_name: The name of the queue to read from. :param max_wait_time: Maximum time to wait for messages (seconds). :return: The received message or None if no message is available. """ with ( self.get_conn() as service_bus_client, service_bus_client.get_queue_receiver(queue_name=queue_name) as receiver, receiver, ): received_msgs = receiver.receive_messages(max_message_count=1, max_wait_time=max_wait_time) if received_msgs: msg = received_msgs[0] receiver.complete_message(msg) return msg return None def read_subscription_message( self, topic_name: str, subscription_name: str, max_wait_time: float | None = None, ) -> ServiceBusReceivedMessage | None: """ Read a single message from a Service Bus topic subscription without callback processing. :param topic_name: The name of the topic. :param subscription_name: The name of the subscription. :param max_wait_time: Maximum time to wait for messages (seconds). :return: The received message or None if no message is available. """ with ( self.get_conn() as service_bus_client, service_bus_client.get_subscription_receiver( topic_name, subscription_name ) as subscription_receiver, subscription_receiver, ): received_msgs = subscription_receiver.receive_messages( max_message_count=1, max_wait_time=max_wait_time ) if received_msgs: msg = received_msgs[0] subscription_receiver.complete_message(msg) return msg return None def _process_message( self, msg: ServiceBusReceivedMessage, context: Context, message_callback: MessageCallback | None, receiver: ServiceBusReceiver, ): """ Process the message by calling the message_callback or logging the message. :param msg: The message to process. :param message_callback: Optional callback to process each message. If not provided, then the message will be logged and completed. If provided, and throws an exception, the message will be abandoned for future redelivery. :param receiver: The receiver that received the message. """ if message_callback is None: self.log.info(msg) receiver.complete_message(msg) else: try: message_callback(msg, context) except Exception as e: self.log.error("Error processing message: %s", e) receiver.abandon_message(msg) raise e else: receiver.complete_message(msg)
MessageHook
python
doocs__leetcode
solution/0600-0699/0655.Print Binary Tree/Solution.py
{ "start": 192, "end": 820 }
class ____: def printTree(self, root: Optional[TreeNode]) -> List[List[str]]: def height(root): if root is None: return -1 return 1 + max(height(root.left), height(root.right)) def dfs(root, r, c): if root is None: return ans[r][c] = str(root.val) dfs(root.left, r + 1, c - 2 ** (h - r - 1)) dfs(root.right, r + 1, c + 2 ** (h - r - 1)) h = height(root) m, n = h + 1, 2 ** (h + 1) - 1 ans = [[""] * n for _ in range(m)] dfs(root, 0, (n - 1) // 2) return ans
Solution
python
django-debug-toolbar__django-debug-toolbar
tests/panels/test_redirects.py
{ "start": 284, "end": 4822 }
class ____(BaseTestCase): panel_id = RedirectsPanel.panel_id def setUp(self): # Suppress the deprecation warning during setup with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) super().setUp() def test_regular_response(self): not_redirect = HttpResponse() self._get_response = lambda request: not_redirect response = self.panel.process_request(self.request) self.assertTrue(response is not_redirect) def test_not_a_redirect(self): redirect = HttpResponse(status=304) self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertTrue(response is redirect) def test_redirect(self): redirect = HttpResponse(status=302) redirect["Location"] = "http://somewhere/else/" self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertFalse(response is redirect) self.assertContains(response, "302 Found") self.assertContains(response, "http://somewhere/else/") def test_redirect_with_broken_context_processor(self): TEMPLATES = copy.deepcopy(settings.TEMPLATES) TEMPLATES[1]["OPTIONS"]["context_processors"] = [ "tests.context_processors.broken" ] with self.settings(TEMPLATES=TEMPLATES): redirect = HttpResponse(status=302) redirect["Location"] = "http://somewhere/else/" self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertFalse(response is redirect) self.assertContains(response, "302 Found") self.assertContains(response, "http://somewhere/else/") def test_unknown_status_code(self): redirect = HttpResponse(status=369) redirect["Location"] = "http://somewhere/else/" self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertContains(response, "369 Unknown Status Code") def test_unknown_status_code_with_reason(self): redirect = HttpResponse(status=369, reason="Look Ma!") redirect["Location"] = "http://somewhere/else/" self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertContains(response, "369 Look Ma!") def test_insert_content(self): """ Test that the panel only inserts content after generate_stats and not the process_request. """ redirect = HttpResponse(status=304) self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertIsNotNone(response) response = self.panel.generate_stats(self.request, redirect) self.assertIsNone(response) async def test_async_compatibility(self): redirect = HttpResponse(status=302) async def get_response(request): return redirect await_response = await get_response(self.request) self._get_response = get_response self.request = AsyncRequestFactory().get("/") response = await self.panel.process_request(self.request) self.assertIsInstance(response, HttpResponse) self.assertTrue(response is await_response) def test_original_response_preserved(self): redirect = HttpResponse(status=302) redirect["Location"] = "http://somewhere/else/" self._get_response = lambda request: redirect response = self.panel.process_request(self.request) self.assertFalse(response is redirect) self.assertTrue(hasattr(response, "original_response")) self.assertTrue(response.original_response is redirect) self.assertIsNone(response.get("Location")) self.assertEqual( response.original_response.get("Location"), "http://somewhere/else/" ) def test_deprecation_warning(self): """Test that a deprecation warning is shown when RedirectsPanel is instantiated.""" with self.assertWarns(DeprecationWarning) as cm: toolbar = DebugToolbar(self.request, self._get_response) toolbar.get_panel_by_id(RedirectsPanel.panel_id) self.assertIn("RedirectsPanel is deprecated", str(cm.warning)) self.assertIn("HistoryPanel", str(cm.warning))
RedirectsPanelTestCase
python
python-attrs__attrs
tests/test_filters.py
{ "start": 231, "end": 573 }
class ____: """ Tests for `_split_what`. """ def test_splits(self): """ Splits correctly. """ assert ( frozenset((int, str)), frozenset(("abcd", "123")), frozenset((fields(C).a,)), ) == _split_what((str, "123", fields(C).a, int, "abcd"))
TestSplitWhat
python
django__django
tests/fixtures_regress/models.py
{ "start": 8639, "end": 8809 }
class ____(BaseNKModel): a = models.ForeignKey(M2MComplexCircular2A, models.CASCADE) b = models.ForeignKey(M2MComplexCircular2B, models.CASCADE)
M2MCircular2ThroughAB
python
spyder-ide__spyder
spyder/plugins/maininterpreter/confpage.py
{ "start": 760, "end": 10137 }
class ____(PluginConfigPage): def __init__(self, plugin, parent): super().__init__(plugin, parent) self.apply_callback = self.perform_adjustments self.plugin_container = plugin.get_container() self.cus_exec_radio = None self.pyexec_edit = None self.cus_exec_combo = None self.conda_edit = None conda_env = get_list_conda_envs_cache() pyenv_env = get_list_pyenv_envs_cache() envs = {**conda_env, **pyenv_env} valid_custom_list = self.get_option('custom_interpreters_list') for env in envs.keys(): path, _ = envs[env] if path not in valid_custom_list: valid_custom_list.append(path) self.set_option('custom_interpreters_list', valid_custom_list) # add custom_interpreter to executable selection executable = self.get_option('executable') # check if the executable is valid - use Spyder's if not if self.get_option('default') or not osp.isfile(executable): executable = get_python_executable() elif not self.get_option('custom_interpreter'): self.set_option('custom_interpreter', ' ') self.plugin_container.add_to_custom_interpreters(executable) self.plugin_container.validate_custom_interpreters_list() def initialize(self): super().initialize() def setup_page(self): newcb = self.create_checkbox # Python executable Group pyexec_group = QGroupBox(_("Python interpreter")) pyexec_bg = QButtonGroup(pyexec_group) pyexec_label = QLabel( _( "Select the default Python interpreter for new IPython consoles " "and Editor code completion" ) ) pyexec_label.setWordWrap(True) self.def_exec_radio = self.create_radiobutton( _("Internal (same used by Spyder)"), 'default', button_group=pyexec_bg, ) self.cus_exec_radio = self.create_radiobutton( _("Selected interpreter:"), 'custom', button_group=pyexec_bg, ) if os.name == 'nt': filters = _("Executables") + " (*.exe)" else: filters = None pyexec_layout = QVBoxLayout() pyexec_layout.addWidget(pyexec_label) pyexec_layout.addWidget(self.def_exec_radio) pyexec_layout.addWidget(self.cus_exec_radio) self.plugin_container.validate_custom_interpreters_list() self.cus_exec_combo = self.create_file_combobox( _('Recent custom interpreters'), self.get_option('custom_interpreters_list'), 'custom_interpreter', filters=filters, default_line_edit=True, adjust_to_contents=True, validate_callback=programs.is_python_interpreter, ) self.cus_exec_combo.setStyleSheet("margin-left: 3px") self.cus_exec_combo.combobox.setMinimumWidth(400) self.def_exec_radio.radiobutton.toggled.connect( self.cus_exec_combo.setDisabled) self.cus_exec_radio.radiobutton.toggled.connect( self.cus_exec_combo.setEnabled) pyexec_layout.addWidget(self.cus_exec_combo) pyexec_group.setLayout(pyexec_layout) self.pyexec_edit = self.cus_exec_combo.combobox.lineEdit() # Conda executable path conda_group = QGroupBox(_("Conda executable")) conda_layout = QVBoxLayout() custom_conda_check = self.create_checkbox( _("Use a custom Conda/Mamba/Micromamba executable"), "custom_conda", tip=_( "Use the specified Conda, Mamba or Micromamba instead of " "finding the executable from the interpreter path.<br><br>" "Required if using a custom Conda prefix with a Conda/Mamba " "installed at a non-standard location." ), ) conda_layout.addWidget(custom_conda_check) conda_path = self.create_browsefile( "", 'conda_path', filters='*.exe', validate_callback=validate_conda, validate_reason=_( "The selected file is not a valid Conda executable" ), ) conda_path.setStyleSheet("margin-left: 3px") conda_path.textbox.setMinimumWidth(400) conda_layout.addWidget(conda_path) conda_group.setLayout(conda_layout) conda_path.setEnabled( self.get_option('custom_conda') ) custom_conda_check.checkbox.toggled.connect(conda_path.setEnabled) self.conda_edit = conda_path.textbox # UMR Group umr_group = QGroupBox(_("User Module Reloader (UMR)")) umr_label = QLabel( _( "UMR forces Python to reload imported modules when " "running a file in an IPython console." ), ) umr_label.setWordWrap(True) umr_enabled_box = newcb( _("Enable UMR"), 'umr/enabled', msg_info=_("This change will only be applied to new consoles"), tip=_( "This option will enable the User Module Reloader (UMR) " "in IPython consoles. UMR forces Python to perform a " "deep reload of imported modules when running Python files " "with Spyder's <code>Run</code> command.<br><br>" "After being enabled, UMR requires a console restart " "(otherwise only newly imported modules will be " "reloaded when running files).<br><br>" "If errors occur when re-running a PyQt-based program, " "please check that the Qt objects are properly destroyed " "(e.g. you may have to use the attribute " "<code>Qt.WA_DeleteOnClose</code> on your main window, " "using the <code>setAttribute</code> method)." ), ) umr_verbose_box = newcb( _("Print list of reloaded modules"), 'umr/verbose', msg_info=_("This change will only be applied to new consoles"), ) umr_namelist_btn = QPushButton( _("Select modules to exclude from being reloaded")) umr_namelist_btn.clicked.connect(self.set_umr_namelist) umr_layout = QVBoxLayout() umr_layout.addWidget(umr_label) umr_layout.addWidget(umr_enabled_box) umr_layout.addWidget(umr_verbose_box) umr_layout.addWidget(umr_namelist_btn) umr_group.setLayout(umr_layout) # Layout vlayout = QVBoxLayout() vlayout.addWidget(pyexec_group) vlayout.addWidget(conda_group) vlayout.addWidget(umr_group) vlayout.addStretch(1) self.setLayout(vlayout) def set_umr_namelist(self): """Set UMR excluded module names list.""" example_excludes = "<code>numpy, scipy</code>" arguments, valid = QInputDialog.getText( self, _('UMR'), _("List of excluded modules (e.g. {})").format(example_excludes), QLineEdit.Normal, ", ".join(self.get_option('umr/namelist')), ) if valid: arguments = str(arguments) if arguments: namelist = arguments.replace(' ', '').split(',') fixed_namelist = [] non_ascii_namelist = [] for module_name in namelist: if programs.is_module_installed(module_name): fixed_namelist.append(module_name) invalid = ", ".join(set(namelist) - set(fixed_namelist) - set(non_ascii_namelist)) if invalid: QMessageBox.warning( self, _('UMR'), _( "The following modules are not installed:\n{}" ).format(invalid), QMessageBox.Ok, ) QMessageBox.information( self, _('UMR'), _("Changes will only be applied to new IPython consoles"), QMessageBox.Ok, ) else: fixed_namelist = [] self.set_option('umr/namelist', fixed_namelist) def perform_adjustments(self): """Perform some adjustments to the page after applying preferences.""" if not self.def_exec_radio.radiobutton.isChecked(): # Get current executable executable = self.pyexec_edit.text() executable = osp.normpath(executable) if executable.endswith('pythonw.exe'): executable = executable.replace("pythonw.exe", "python.exe") # Update combobox items. custom_list = self.cus_exec_combo.combobox.choices if executable not in custom_list: custom_list = custom_list + [executable] self.cus_exec_combo.combobox.clear() self.cus_exec_combo.combobox.addItems(custom_list) self.pyexec_edit.setText(executable)
MainInterpreterConfigPage
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_wx.py
{ "start": 50403, "end": 50650 }
class ____(backend_tools.ToolHelpBase): def trigger(self, *args): _HelpDialog.show(self.figure.canvas.GetTopLevelParent(), self._get_help_entries()) @backend_tools._register_tool_class(_FigureCanvasWxBase)
HelpWx
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/sensors/datafusion.py
{ "start": 1346, "end": 5624 }
class ____(BaseSensorOperator): """ Check the status of the pipeline in the Google Cloud Data Fusion. :param pipeline_name: Your pipeline name. :param pipeline_id: Your pipeline ID. :param expected_statuses: State that is expected :param failure_statuses: State that will terminate the sensor with an exception :param instance_name: The name of the instance. :param location: The Cloud Data Fusion location in which to handle the request. :param project_id: The ID of the Google Cloud project that the instance belongs to. :param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID is always default. If your pipeline belongs to an Enterprise edition instance, you can create a namespace. :param gcp_conn_id: The connection ID to use when fetching connection info. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ("pipeline_id",) def __init__( self, pipeline_name: str, pipeline_id: str, expected_statuses: Iterable[str], instance_name: str, location: str, failure_statuses: Iterable[str] | None = None, project_id: str = PROVIDE_PROJECT_ID, namespace: str = "default", gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.pipeline_name = pipeline_name self.pipeline_id = pipeline_id self.expected_statuses = expected_statuses self.failure_statuses = failure_statuses self.instance_name = instance_name self.location = location self.project_id = project_id self.namespace = namespace self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def poke(self, context: Context) -> bool: self.log.info( "Waiting for pipeline %s to be in one of the states: %s.", self.pipeline_id, ", ".join(self.expected_statuses), ) hook = DataFusionHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) instance = hook.get_instance( instance_name=self.instance_name, location=self.location, project_id=self.project_id, ) api_url = instance["apiEndpoint"] pipeline_status = None try: pipeline_workflow = hook.get_pipeline_workflow( pipeline_name=self.pipeline_name, instance_url=api_url, pipeline_id=self.pipeline_id, namespace=self.namespace, ) pipeline_status = pipeline_workflow.get("status") except AirflowNotFoundException: message = "Specified Pipeline ID was not found." raise AirflowException(message) except AirflowException: pass # Because the pipeline may not be visible in system yet if pipeline_status is not None: if self.failure_statuses and pipeline_status in self.failure_statuses: message = ( f"Pipeline with id '{self.pipeline_id}' state is: {pipeline_status}. " f"Terminating sensor..." ) raise AirflowException(message) self.log.debug( "Current status of the pipeline workflow for %s: %s.", self.pipeline_id, pipeline_status ) return pipeline_status is not None and pipeline_status in self.expected_statuses
CloudDataFusionPipelineStateSensor
python
huggingface__transformers
src/transformers/models/sam3_tracker/modular_sam3_tracker.py
{ "start": 5610, "end": 5668 }
class ____(Sam2MaskDecoder): pass
Sam3TrackerMaskDecoder
python
getsentry__sentry
src/sentry/incidents/models/incident.py
{ "start": 8391, "end": 8501 }
class ____(Enum): CREATED = 1 STATUS_CHANGE = 2 DETECTED = 4 @region_silo_model
IncidentActivityType
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/direct_mpich/package.py
{ "start": 217, "end": 429 }
class ____(Package): homepage = "http://www.example.com" url = "http://www.example.com/direct_mpich-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") depends_on("mpich")
DirectMpich
python
aio-libs__aiohttp
aiohttp/web_exceptions.py
{ "start": 6719, "end": 6782 }
class ____(HTTPMove): status_code = 307
HTTPTemporaryRedirect
python
spack__spack
lib/spack/spack/solver/reuse.py
{ "start": 8638, "end": 14848 }
class ____: """Selects specs that can be reused during concretization.""" def __init__(self, configuration: spack.config.Configuration) -> None: self.configuration = configuration self.store = spack.store.create(configuration) self.reuse_strategy = ReuseStrategy.ROOTS reuse_yaml = self.configuration.get("concretizer:reuse", False) self.reuse_sources = [] if not isinstance(reuse_yaml, Mapping): self.reuse_sources.append( SpecFilter.from_packages_yaml(configuration, include=[], exclude=[]) ) if reuse_yaml is False: self.reuse_strategy = ReuseStrategy.NONE return if reuse_yaml == "dependencies": self.reuse_strategy = ReuseStrategy.DEPENDENCIES self.reuse_sources.extend( [ SpecFilter.from_store( configuration=self.configuration, include=[], exclude=[] ), SpecFilter.from_buildcache( configuration=self.configuration, include=[], exclude=[] ), SpecFilter.from_environment( configuration=self.configuration, include=[], exclude=[], env=spack.environment.active_environment(), # with all concrete includes ), ] ) else: has_external_source = False roots = reuse_yaml.get("roots", True) if roots is True: self.reuse_strategy = ReuseStrategy.ROOTS else: self.reuse_strategy = ReuseStrategy.DEPENDENCIES default_include = reuse_yaml.get("include", []) default_exclude = reuse_yaml.get("exclude", []) default_sources = [{"type": "local"}, {"type": "buildcache"}] for source in reuse_yaml.get("from", default_sources): include = source.get("include", default_include) exclude = source.get("exclude", default_exclude) if source["type"] == "environment" and "path" in source: env_dir = spack.environment.as_env_dir(source["path"]) active_env = spack.environment.active_environment() if active_env and env_dir in active_env.included_concrete_envs: # If the environment is included as a concrete environment, use the # local copy of specs in the active environment. # note: included concrete environments are only updated at concretization # time, and reuse needs to match the included specs. self.reuse_sources.append( SpecFilter.from_environment_included_concrete( self.configuration, include=include, exclude=exclude, env=active_env, included_concrete=env_dir, ) ) else: # If the environment is not included as a concrete environment, use the # current specs from its lockfile. self.reuse_sources.append( SpecFilter.from_environment( self.configuration, include=include, exclude=exclude, env=spack.environment.environment_from_name_or_dir(env_dir), ) ) elif source["type"] == "environment": # reusing from the current environment implicitly reuses from all of the # included concrete environments self.reuse_sources.append( SpecFilter.from_environment( self.configuration, include=include, exclude=exclude, env=spack.environment.active_environment(), ) ) elif source["type"] == "local": self.reuse_sources.append( SpecFilter.from_store(self.configuration, include=include, exclude=exclude) ) elif source["type"] == "buildcache": self.reuse_sources.append( SpecFilter.from_buildcache( self.configuration, include=include, exclude=exclude ) ) elif source["type"] == "external": has_external_source = True if include: # Since libcs are implicit externals, we need to implicitly include them include = include + sorted(all_libcs()) # type: ignore[type-var] self.reuse_sources.append( SpecFilter.from_packages_yaml( configuration, include=include, exclude=exclude ) ) # If "external" is not specified, we assume that all externals have to be included if not has_external_source: self.reuse_sources.append( SpecFilter.from_packages_yaml(configuration, include=[], exclude=[]) ) def reusable_specs(self, specs: List[spack.spec.Spec]) -> List[spack.spec.Spec]: result = [] for reuse_source in self.reuse_sources: result.extend(reuse_source.selected_specs()) # If we only want to reuse dependencies, remove the root specs if self.reuse_strategy == ReuseStrategy.DEPENDENCIES: result = [spec for spec in result if not any(root in spec for root in specs)] return result
ReusableSpecsSelector
python
ansible__ansible
test/lib/ansible_test/_internal/locale_util.py
{ "start": 612, "end": 2161 }
class ____(SystemExit): """Exception to raise when locale related errors occur.""" def __init__(self, message: str) -> None: super().__init__(f'ERROR: {message}') def configure_locale() -> tuple[str, t.Optional[str]]: """Configure the locale, returning the selected locale and an optional warning.""" if (fs_encoding := sys.getfilesystemencoding()).lower() != 'utf-8': raise LocaleError(f'ansible-test requires the filesystem encoding to be UTF-8, but "{fs_encoding}" was detected.') candidate_locales = STANDARD_LOCALE, FALLBACK_LOCALE errors: dict[str, str] = {} warning: t.Optional[str] = None configured_locale: t.Optional[str] = None for candidate_locale in candidate_locales: try: locale.setlocale(locale.LC_ALL, candidate_locale) locale.getlocale() except (locale.Error, ValueError) as ex: errors[candidate_locale] = str(ex) else: configured_locale = candidate_locale break if not configured_locale: raise LocaleError('ansible-test could not initialize a supported locale:\n' + '\n'.join(f'{key}: {value}' for key, value in errors.items())) if configured_locale != STANDARD_LOCALE: warning = (f'Using locale "{configured_locale}" instead of "{STANDARD_LOCALE}". ' 'Tests which depend on the locale may behave unexpectedly.') return configured_locale, warning CONFIGURED_LOCALE, LOCALE_WARNING = configure_locale()
LocaleError
python
wandb__wandb
wandb/vendor/pygments/lexers/objective.py
{ "start": 11249, "end": 22764 }
class ____(RegexLexer): """ For `Swift <https://developer.apple.com/swift/>`_ source. .. versionadded:: 2.0 """ name = 'Swift' filenames = ['*.swift'] aliases = ['swift'] mimetypes = ['text/x-swift'] tokens = { 'root': [ # Whitespace and Comments (r'\n', Text), (r'\s+', Text), (r'//', Comment.Single, 'comment-single'), (r'/\*', Comment.Multiline, 'comment-multi'), (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'), # Keywords include('keywords'), # Global Types (words(( 'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView', 'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer', 'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray', 'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double', 'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator', 'EnumerateSequence', 'FilterCollectionView', 'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView', 'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf', 'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer', 'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator', 'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection', 'LazyForwardCollection', 'LazyRandomAccessCollection', 'LazySequence', 'MapCollectionView', 'MapSequenceGenerator', 'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap', 'Optional', 'PermutationGenerator', 'QuickLookObject', 'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat', 'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf', 'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator', 'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32', 'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult', 'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer', 'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer', 'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2', # Protocols 'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible', 'BidirectionalIndexType', 'BitwiseOperationsType', 'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType', 'CollectionType', 'Comparable', 'DebugPrintable', 'DictionaryLiteralConvertible', 'Equatable', 'ExtendedGraphemeClusterLiteralConvertible', 'ExtensibleCollectionType', 'FloatLiteralConvertible', 'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable', 'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType', 'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable', 'NilLiteralConvertible', 'OutputStreamType', 'Printable', 'RandomAccessIndexType', 'RangeReplaceableCollectionType', 'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType', 'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable', 'Streamable', 'Strideable', 'StringInterpolationConvertible', 'StringLiteralConvertible', 'UnicodeCodecType', 'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType', '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType', '_CollectionType', '_Comparable', '_ExtensibleCollectionType', '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType', '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType', '_RawOptionSetType', '_SequenceType', '_Sequence_Type', '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable', '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType', '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType', '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType', '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType', '_SwiftNSStringType', '_UnsignedIntegerType', # Variables 'C_ARGC', 'C_ARGV', 'Process', # Typealiases 'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16', 'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort', 'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort', 'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64', 'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType', 'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word', # Foundation/Cocoa 'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'), Name.Builtin), # Functions (words(( 'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure', 'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln', 'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal', 'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices', 'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare', 'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps', 'partition', 'precondition', 'preconditionFailure', 'prefix', 'print', 'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast', 'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted', 'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue', 'suffix', 'swap', 'toDebugString', 'toString', 'transcode', 'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast', 'withExtendedLifetime', 'withUnsafeMutablePointer', 'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers', 'withVaList'), suffix=r'\b'), Name.Builtin.Pseudo), # Implicit Block Variables (r'\$\d+', Name.Variable), # Binary Literal (r'0b[01_]+', Number.Bin), # Octal Literal (r'0o[0-7_]+', Number.Oct), # Hexadecimal Literal (r'0x[0-9a-fA-F_]+', Number.Hex), # Decimal Literal (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), (r'[0-9][0-9_]*', Number.Integer), # String Literal (r'"', String, 'string'), # Operators and Punctuation (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), (r'[/=\-+!*%<>&|^?~]+', Operator), # Identifier (r'[a-zA-Z_]\w*', Name) ], 'keywords': [ (words(( 'as', 'break', 'case', 'catch', 'continue', 'default', 'defer', 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is', 'repeat', 'return', '#selector', 'switch', 'throw', 'try', 'where', 'while'), suffix=r'\b'), Keyword), (r'@availability\([^)]+\)', Keyword.Reserved), (words(( 'associativity', 'convenience', 'dynamic', 'didSet', 'final', 'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating', 'none', 'nonmutating', 'optional', 'override', 'postfix', 'precedence', 'prefix', 'Protocol', 'required', 'rethrows', 'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet', '@availability', '@autoclosure', '@noreturn', '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc', '@UIApplicationMain', '@IBAction', '@IBDesignable', '@IBInspectable', '@IBOutlet'), suffix=r'\b'), Keyword.Reserved), (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' r'|__FILE__|__FUNCTION__|__LINE__|_' r'|#(?:file|line|column|function))\b', Keyword.Constant), (r'import\b', Keyword.Declaration, 'module'), (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Class)), (r'(func)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Function)), (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, Text, Name.Variable)), (words(( 'class', 'deinit', 'enum', 'extension', 'func', 'import', 'init', 'internal', 'let', 'operator', 'private', 'protocol', 'public', 'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'), Keyword.Declaration) ], 'comment': [ (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', Comment.Special) ], # Nested 'comment-single': [ (r'\n', Text, '#pop'), include('comment'), (r'[^\n]', Comment.Single) ], 'comment-multi': [ include('comment'), (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ], 'module': [ (r'\n', Text, '#pop'), (r'[a-zA-Z_]\w*', Name.Class), include('root') ], 'preproc': [ (r'\n', Text, '#pop'), include('keywords'), (r'[A-Za-z]\w*', Comment.Preproc), include('root') ], 'string': [ (r'\\\(', String.Interpol, 'string-intp'), (r'"', String, '#pop'), (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), (r'[^\\"]+', String), (r'\\', String) ], 'string-intp': [ (r'\(', String.Interpol, '#push'), (r'\)', String.Interpol, '#pop'), include('root') ] } def get_tokens_unprocessed(self, text): from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ COCOA_PROTOCOLS, COCOA_PRIMITIVES for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name or token is Name.Class: if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ or value in COCOA_PRIMITIVES: token = Name.Builtin.Pseudo yield index, token, value
SwiftLexer
python
getsentry__sentry
src/sentry/monitors/processing_errors/errors.py
{ "start": 2888, "end": 3212 }
class ____(TypedDict): """ A monitor wasn't found, and we failed to upsert due to invalid config """ type: Literal[ProcessingErrorType.MONITOR_INVALID_CONFIG] errors: Mapping[str, Sequence[str]] """ Mapping of monitor config field name to the problems with that field """
MonitorInvalidConfig
python
tiangolo__fastapi
tests/test_pydantic_v1_v2_list.py
{ "start": 375, "end": 26010 }
class ____(BaseModel): title: str size: int description: Union[str, None] = None sub: SubItem multi: List[SubItem] = [] app = FastAPI() @app.post("/item") def handle_item(data: Item) -> List[Item]: return [data, data] @app.post("/item-filter", response_model=List[Item]) def handle_item_filter(data: Item) -> Any: extended_data = data.dict() extended_data.update({"secret_data": "classified", "internal_id": 12345}) extended_data["sub"].update({"internal_id": 67890}) return [extended_data, extended_data] @app.post("/item-list") def handle_item_list(data: List[Item]) -> Item: if data: return data[0] return Item(title="", size=0, sub=SubItem(name="")) @app.post("/item-list-filter", response_model=Item) def handle_item_list_filter(data: List[Item]) -> Any: if data: extended_data = data[0].dict() extended_data.update({"secret_data": "classified", "internal_id": 12345}) extended_data["sub"].update({"internal_id": 67890}) return extended_data return Item(title="", size=0, sub=SubItem(name="")) @app.post("/item-list-to-list") def handle_item_list_to_list(data: List[Item]) -> List[Item]: return data @app.post("/item-list-to-list-filter", response_model=List[Item]) def handle_item_list_to_list_filter(data: List[Item]) -> Any: if data: extended_data = data[0].dict() extended_data.update({"secret_data": "classified", "internal_id": 12345}) extended_data["sub"].update({"internal_id": 67890}) return [extended_data, extended_data] return [] client = TestClient(app) def test_item_to_list(): response = client.post( "/item", json={ "title": "Test Item", "size": 100, "description": "This is a test item", "sub": {"name": "SubItem1"}, "multi": [{"name": "Multi1"}, {"name": "Multi2"}], }, ) assert response.status_code == 200, response.text result = response.json() assert isinstance(result, list) assert len(result) == 2 for item in result: assert item == { "title": "Test Item", "size": 100, "description": "This is a test item", "sub": {"name": "SubItem1"}, "multi": [{"name": "Multi1"}, {"name": "Multi2"}], } def test_item_to_list_filter(): response = client.post( "/item-filter", json={ "title": "Filtered Item", "size": 200, "description": "Test filtering", "sub": {"name": "SubFiltered"}, "multi": [], }, ) assert response.status_code == 200, response.text result = response.json() assert isinstance(result, list) assert len(result) == 2 for item in result: assert item == { "title": "Filtered Item", "size": 200, "description": "Test filtering", "sub": {"name": "SubFiltered"}, "multi": [], } # Verify secret fields are filtered out assert "secret_data" not in item assert "internal_id" not in item assert "internal_id" not in item["sub"] def test_list_to_item(): response = client.post( "/item-list", json=[ {"title": "First Item", "size": 50, "sub": {"name": "First Sub"}}, {"title": "Second Item", "size": 75, "sub": {"name": "Second Sub"}}, ], ) assert response.status_code == 200, response.text assert response.json() == { "title": "First Item", "size": 50, "description": None, "sub": {"name": "First Sub"}, "multi": [], } def test_list_to_item_empty(): response = client.post( "/item-list", json=[], ) assert response.status_code == 200, response.text assert response.json() == { "title": "", "size": 0, "description": None, "sub": {"name": ""}, "multi": [], } def test_list_to_item_filter(): response = client.post( "/item-list-filter", json=[ { "title": "First Item", "size": 100, "sub": {"name": "First Sub"}, "multi": [{"name": "Multi1"}], }, {"title": "Second Item", "size": 200, "sub": {"name": "Second Sub"}}, ], ) assert response.status_code == 200, response.text result = response.json() assert result == { "title": "First Item", "size": 100, "description": None, "sub": {"name": "First Sub"}, "multi": [{"name": "Multi1"}], } # Verify secret fields are filtered out assert "secret_data" not in result assert "internal_id" not in result def test_list_to_item_filter_no_data(): response = client.post("/item-list-filter", json=[]) assert response.status_code == 200, response.text assert response.json() == { "title": "", "size": 0, "description": None, "sub": {"name": ""}, "multi": [], } def test_list_to_list(): input_items = [ {"title": "Item 1", "size": 10, "sub": {"name": "Sub1"}}, { "title": "Item 2", "size": 20, "description": "Second item", "sub": {"name": "Sub2"}, "multi": [{"name": "M1"}, {"name": "M2"}], }, {"title": "Item 3", "size": 30, "sub": {"name": "Sub3"}}, ] response = client.post( "/item-list-to-list", json=input_items, ) assert response.status_code == 200, response.text result = response.json() assert isinstance(result, list) assert len(result) == 3 assert result[0] == { "title": "Item 1", "size": 10, "description": None, "sub": {"name": "Sub1"}, "multi": [], } assert result[1] == { "title": "Item 2", "size": 20, "description": "Second item", "sub": {"name": "Sub2"}, "multi": [{"name": "M1"}, {"name": "M2"}], } assert result[2] == { "title": "Item 3", "size": 30, "description": None, "sub": {"name": "Sub3"}, "multi": [], } def test_list_to_list_filter(): response = client.post( "/item-list-to-list-filter", json=[{"title": "Item 1", "size": 100, "sub": {"name": "Sub1"}}], ) assert response.status_code == 200, response.text result = response.json() assert isinstance(result, list) assert len(result) == 2 for item in result: assert item == { "title": "Item 1", "size": 100, "description": None, "sub": {"name": "Sub1"}, "multi": [], } # Verify secret fields are filtered out assert "secret_data" not in item assert "internal_id" not in item def test_list_to_list_filter_no_data(): response = client.post( "/item-list-to-list-filter", json=[], ) assert response.status_code == 200, response.text assert response.json() == [] def test_list_validation_error(): response = client.post( "/item-list", json=[ {"title": "Valid Item", "size": 100, "sub": {"name": "Sub1"}}, { "title": "Invalid Item" # Missing required fields: size and sub }, ], ) assert response.status_code == 422, response.text error_detail = response.json()["detail"] assert len(error_detail) == 2 assert { "loc": ["body", 1, "size"], "msg": "field required", "type": "value_error.missing", } in error_detail assert { "loc": ["body", 1, "sub"], "msg": "field required", "type": "value_error.missing", } in error_detail def test_list_nested_validation_error(): response = client.post( "/item-list", json=[ {"title": "Item with bad sub", "size": 100, "sub": {"wrong_field": "value"}} ], ) assert response.status_code == 422, response.text assert response.json() == snapshot( { "detail": [ { "loc": ["body", 0, "sub", "name"], "msg": "field required", "type": "value_error.missing", } ] } ) def test_list_type_validation_error(): response = client.post( "/item-list", json=[{"title": "Item", "size": "not_a_number", "sub": {"name": "Sub"}}], ) assert response.status_code == 422, response.text assert response.json() == snapshot( { "detail": [ { "loc": ["body", 0, "size"], "msg": "value is not a valid integer", "type": "type_error.integer", } ] } ) def test_invalid_list_structure(): response = client.post( "/item-list", json={"title": "Not a list", "size": 100, "sub": {"name": "Sub"}}, ) assert response.status_code == 422, response.text assert response.json() == snapshot( { "detail": [ { "loc": ["body"], "msg": "value is not a valid list", "type": "type_error.list", } ] } ) def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == snapshot( { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/item": { "post": { "summary": "Handle Item", "operationId": "handle_item_item_post", "requestBody": { "content": { "application/json": { "schema": pydantic_snapshot( v2=snapshot( { "allOf": [ { "$ref": "#/components/schemas/Item" } ], "title": "Data", } ), v1=snapshot( {"$ref": "#/components/schemas/Item"} ), ) } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item" }, "type": "array", "title": "Response Handle Item Item Post", } } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, "/item-filter": { "post": { "summary": "Handle Item Filter", "operationId": "handle_item_filter_item_filter_post", "requestBody": { "content": { "application/json": { "schema": pydantic_snapshot( v2=snapshot( { "allOf": [ { "$ref": "#/components/schemas/Item" } ], "title": "Data", } ), v1=snapshot( {"$ref": "#/components/schemas/Item"} ), ) } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item" }, "type": "array", "title": "Response Handle Item Filter Item Filter Post", } } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, "/item-list": { "post": { "summary": "Handle Item List", "operationId": "handle_item_list_item_list_post", "requestBody": { "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Data", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Item"} } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, "/item-list-filter": { "post": { "summary": "Handle Item List Filter", "operationId": "handle_item_list_filter_item_list_filter_post", "requestBody": { "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Data", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Item"} } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, "/item-list-to-list": { "post": { "summary": "Handle Item List To List", "operationId": "handle_item_list_to_list_item_list_to_list_post", "requestBody": { "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Data", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item" }, "type": "array", "title": "Response Handle Item List To List Item List To List Post", } } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, "/item-list-to-list-filter": { "post": { "summary": "Handle Item List To List Filter", "operationId": "handle_item_list_to_list_filter_item_list_to_list_filter_post", "requestBody": { "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Data", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item" }, "type": "array", "title": "Response Handle Item List To List Filter Item List To List Filter Post", } } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, }, "components": { "schemas": { "HTTPValidationError": { "properties": { "detail": { "items": { "$ref": "#/components/schemas/ValidationError" }, "type": "array", "title": "Detail", } }, "type": "object", "title": "HTTPValidationError", }, "Item": { "properties": { "title": {"type": "string", "title": "Title"}, "size": {"type": "integer", "title": "Size"}, "description": {"type": "string", "title": "Description"}, "sub": {"$ref": "#/components/schemas/SubItem"}, "multi": { "items": {"$ref": "#/components/schemas/SubItem"}, "type": "array", "title": "Multi", "default": [], }, }, "type": "object", "required": ["title", "size", "sub"], "title": "Item", }, "SubItem": { "properties": {"name": {"type": "string", "title": "Name"}}, "type": "object", "required": ["name"], "title": "SubItem", }, "ValidationError": { "properties": { "loc": { "items": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, "type": "array", "title": "Location", }, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}, }, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError", }, } }, } )
Item
python
getsentry__sentry
tests/sentry/core/endpoints/test_organization_details.py
{ "start": 70029, "end": 80732 }
class ____(TwoFactorAPITestCase): endpoint = "sentry-api-0-organization-details" def setUp(self) -> None: # 2FA enforced org self.org_2fa = self.create_organization(owner=self.create_user()) self.enable_org_2fa(self.org_2fa) self.no_2fa_user = self.create_user() self.create_member(organization=self.org_2fa, user=self.no_2fa_user, role="member") # 2FA not enforced org self.owner = self.create_user() self.organization = self.create_organization(owner=self.owner) self.manager = self.create_user() self.create_member(organization=self.organization, user=self.manager, role="manager") self.org_user = self.create_user() self.create_member(organization=self.organization, user=self.org_user, role="member") # 2FA enrolled user self.has_2fa = self.create_user() with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.has_2fa) self.create_member(organization=self.organization, user=self.has_2fa, role="manager") with assume_test_silo_mode_of(Authenticator): assert self.has_2fa.has_2fa() def assert_2fa_email_equal(self, outbox, expected): invite_url_regex = re.compile(r"http://.*/accept/[0-9]+/[a-f0-9]+/") assert len(outbox) == len(expected) assert sorted(email.to[0] for email in outbox) == sorted(expected) for email in outbox: assert invite_url_regex.search( email.body ), f"No invite URL found in 2FA invite email body to: {email.to}" def assert_has_correct_audit_log( self, acting_user: User, target_user: User, organization: Organization ): with outbox_runner(): pass with assume_test_silo_mode_of(AuditLogEntry): audit_log_entry_query = AuditLogEntry.objects.filter( actor_id=acting_user.id, organization_id=organization.id, event=audit_log.get_event_id("MEMBER_PENDING"), target_user_id=target_user.id, ) assert ( audit_log_entry_query.exists() ), f"No matching audit log entry found for actor: {acting_user}, target_user: {target_user}" assert ( len(audit_log_entry_query) == 1 ), f"More than 1 matching audit log entry found for actor: {acting_user}, target_user: {target_user}" audit_log_entry = audit_log_entry_query[0] assert audit_log_entry.target_object == organization.id assert audit_log_entry.data assert audit_log_entry.ip_address == "127.0.0.1" def test_cannot_enforce_2fa_without_2fa_enabled(self) -> None: with assume_test_silo_mode_of(Authenticator): assert not self.owner.has_2fa() self.assert_cannot_enable_org_2fa(self.organization, self.owner, 400, ERR_NO_2FA) # having recovery codes only (backup method) should not allow to enforce org 2FA with assume_test_silo_mode_of(Authenticator): RecoveryCodeInterface().enroll(self.owner) assert not self.owner.has_2fa() self.assert_cannot_enable_org_2fa(self.organization, self.owner, 400, ERR_NO_2FA) def test_cannot_enforce_2fa_with_sso_enabled(self) -> None: with assume_test_silo_mode_of(AuthProvider): auth_provider = AuthProvider.objects.create( provider="github", organization_id=self.organization.id ) # bypass SSO login auth_provider.flags.allow_unlinked = True with assume_test_silo_mode_of(AuthProvider): auth_provider.save() self.assert_cannot_enable_org_2fa(self.organization, self.has_2fa, 400, ERR_SSO_ENABLED) def test_cannot_enforce_2fa_with_saml_enabled(self) -> None: with assume_test_silo_mode_of(AuthProvider): auth_provider = AuthProvider.objects.create( provider="saml2", organization_id=self.organization.id ) # bypass SSO login auth_provider.flags.allow_unlinked = True with assume_test_silo_mode_of(AuthProvider): auth_provider.save() self.assert_cannot_enable_org_2fa(self.organization, self.has_2fa, 400, ERR_SSO_ENABLED) def test_owner_can_set_2fa_single_member(self) -> None: org = self.create_organization(owner=self.owner) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.owner) with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): self.assert_can_enable_org_2fa(org, self.owner) assert len(mail.outbox) == 0 def test_manager_can_set_2fa(self) -> None: org = self.create_organization(owner=self.owner) self.create_member(organization=org, user=self.manager, role="manager") self.assert_cannot_enable_org_2fa(org, self.manager, 400) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.manager) with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): self.assert_can_enable_org_2fa(org, self.manager) self.assert_2fa_email_equal(mail.outbox, [self.owner.email]) self.assert_has_correct_audit_log( acting_user=self.manager, target_user=self.owner, organization=org ) def test_members_cannot_set_2fa(self) -> None: self.assert_cannot_enable_org_2fa(self.organization, self.org_user, 403) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.org_user) self.assert_cannot_enable_org_2fa(self.organization, self.org_user, 403) def test_owner_can_set_org_2fa(self) -> None: org = self.create_organization(owner=self.owner) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.owner) user_emails_without_2fa = self.add_2fa_users_to_org(org) with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): self.assert_can_enable_org_2fa(org, self.owner) self.assert_2fa_email_equal(mail.outbox, user_emails_without_2fa) for user_email in user_emails_without_2fa: with assume_test_silo_mode_of(User): user = User.objects.get(username=user_email) self.assert_has_correct_audit_log( acting_user=self.owner, target_user=user, organization=org ) mail.outbox = [] with self.options({"system.url-prefix": "http://example.com"}), self.tasks(): response = self.api_disable_org_2fa(org, self.owner) assert response.status_code == 200 assert not Organization.objects.get(id=org.id).flags.require_2fa assert len(mail.outbox) == 0 def test_preexisting_members_must_enable_2fa(self) -> None: self.login_as(self.no_2fa_user) self.get_error_response(self.org_2fa.slug, status_code=401) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.no_2fa_user) self.get_success_response(self.org_2fa.slug) def test_new_member_must_enable_2fa(self) -> None: new_user = self.create_user() self.create_member(organization=self.org_2fa, user=new_user, role="member") self.login_as(new_user) self.get_error_response(self.org_2fa.slug, status_code=401) with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(new_user) self.get_success_response(self.org_2fa.slug) def test_member_disable_all_2fa_blocked(self) -> None: with assume_test_silo_mode_of(Authenticator): TotpInterface().enroll(self.no_2fa_user) self.login_as(self.no_2fa_user) self.get_success_response(self.org_2fa.slug) with assume_test_silo_mode_of(Authenticator): Authenticator.objects.get(user=self.no_2fa_user).delete() self.get_error_response(self.org_2fa.slug, status_code=401) def test_superuser_can_access_org_details(self) -> None: superuser = self.create_user(is_superuser=True) self.login_as(superuser, superuser=True) self.get_success_response(self.org_2fa.slug) def test_trusted_relays_option_serialization() -> None: # incoming raw data data = { "publicKey": _VALID_RELAY_KEYS[0], "name": "Relay1", "description": "the description", "lastModified": "2020-05-20T20:21:22", "created": "2020-01-17T11:12:13", } serializer = TrustedRelaySerializer(data=data) assert serializer.is_valid() expected_incoming = { "public_key": _VALID_RELAY_KEYS[0], "name": "Relay1", "description": "the description", } # check incoming deserialization (data will be further completed with date info the by server) assert serializer.validated_data == expected_incoming invalid_payloads = [ { "publicKey": _VALID_RELAY_KEYS[0], # no name "description": "the description", }, { "publicKey": _VALID_RELAY_KEYS[0], "name": " ", # empty name "description": "the description", }, { "publicKey": _VALID_RELAY_KEYS[0], "name": None, # null name "description": "the description", }, {"publicKey": "Bad Key", "name": "name", "description": "the description"}, # invalid key { # missing key "name": "name", "description": "the description", }, {"publicKey": None, "name": "name", "description": "the description"}, # null key "Bad input", # not an object ] @pytest.mark.parametrize("invalid_data", invalid_payloads) def test_trusted_relay_serializer_validation(invalid_data) -> None: """ Tests that the public key is validated """ # incoming raw data serializer = TrustedRelaySerializer(data=invalid_data) assert not serializer.is_valid() def test_trusted_relays_option_deserialization() -> None: # internal data instance = { "public_key": "key1", "name": "Relay1", "description": "the description", "last_modified": "2020-05-20T20:21:22Z", "created": "2020-01-17T11:12:13Z", } serializer = TrustedRelaySerializer(instance) expected_outgoing = { "publicKey": "key1", "name": "Relay1", "description": "the description", "lastModified": "2020-05-20T20:21:22Z", "created": "2020-01-17T11:12:13Z", } # check outgoing deserialization (all info in camelCase) assert serializer.data == expected_outgoing
OrganizationSettings2FATest
python
has2k1__plotnine
plotnine/scales/scale.py
{ "start": 699, "end": 8078 }
class ____( ABC, Generic[RangeT, BreaksUserT, LimitsUserT, GuideTypeT], metaclass=Register, ): """ Base class for all scales """ name: str | None = None """ The name of the scale. It is used as the label of the axis or the title of the guide. Suitable defaults are chosen depending on the type of scale. """ # # major breaks breaks: BreaksUserT """ List of major break points. Or a callable that takes a tuple of limits and returns a list of breaks. If `True`, automatically calculate the breaks. """ limits: LimitsUserT """ Limits of the scale. Most commonly, these are the min & max values for the scales. For scales that deal with categoricals, these may be a subset or superset of the categories. """ # labels at the breaks labels: ScaleLabelsUser = True """ Labels at the `breaks`. Alternatively, a callable that takes an array_like of break points as input and returns a list of strings. """ # multiplicative and additive expansion constants # fmt: off expand: ( tuple[float, float] | tuple[float, float, float, float] | None ) = None # fmt: on """ Multiplicative and additive expansion constants that determine how the scale is expanded. If specified must be of length 2 or 4. Specifically the values are in this order: ``` (mul, add) (mul_low, add_low, mul_high, add_high) ``` For example, - `(0, 0)` - Do not expand. - `(0, 1)` - Expand lower and upper limits by 1 unit. - `(1, 0)` - Expand lower and upper limits by 100%. - `(0, 0, 0, 0)` - Do not expand, as `(0, 0)`. - `(0, 0, 0, 1)` - Expand upper limit by 1 unit. - `(0, 1, 0.1, 0)` - Expand lower limit by 1 unit and upper limit by 10%. - `(0, 0, 0.1, 2)` - Expand upper limit by 10% plus 2 units. If not specified, suitable defaults are chosen. """ # legend or any other guide guide: GuideTypeT """ Whether to include a legend """ # What to do with the NA values na_value: Any = np.nan """ What value to assign to missing values. Default is to assign `np.nan`. """ aesthetics: Sequence[ScaledAestheticsName] = () """ Aesthetics affected by this scale. These are defined by each scale and the user should probably not change them. Have fun. """ _range: RangeT = field(init=False, repr=False) # Defined aesthetics for the scale _aesthetics: Sequence[ScaledAestheticsName] = field(init=False, repr=False) def __post_init__(self): breaks = getattr(self, "breaks") if ( np.iterable(breaks) and np.iterable(self.labels) and len(self.breaks) != len(self.labels) # type: ignore ): raise PlotnineError("Breaks and labels have unequal lengths") if ( breaks is None and not is_position_aes(self.aesthetics) and self.guide is not None ): self.guide = None # pyright: ignore self.aesthetics = rename_aesthetics( self.aesthetics if self.aesthetics else self._aesthetics ) def __radd__(self, other): """ Add this scale to ggplot object """ other.scales.append(copy(self)) return other def map(self, x, limits=None): """ Map every element of x The palette should do the real work, this should make sure that sensible values are sent and return from the palette. """ raise NotImplementedError def train(self, x: pd.Series | NDArray): """ Train scale Parameters ---------- x : A column of data to train over """ raise NotImplementedError def dimension(self, expand=None, limits=None): """ Get the phyical size of the scale. """ raise NotImplementedError def expand_limits( self, limits, # : ScaleLimits expand, # : tuple[float, float] | tuple[float, float, float, float] coord_limits, # : CoordRange | None trans, # : Trans | Type[Trans] ) -> range_view: """ Expand the limits of the scale """ raise NotImplementedError def transform_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Transform dataframe """ raise NotImplementedError def transform(self, x): """ Transform array|series x """ raise NotImplementedError def inverse_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Inverse transform dataframe """ raise NotImplementedError def inverse(self, x): """ Inverse transform array|series x """ raise NotImplementedError def view( self, limits=None, # : Optional[ScaleLimits] range=None, # : Optional[CoordRange] = None ) -> scale_view: """ Information about the trained scale """ raise NotImplementedError def default_expansion( self, mult: float | tuple[float, float] = 0, add: Any | tuple[Any, Any] = 0, expand=True, ) -> tuple[float, float, float, float]: """ Get default expansion for this scale """ if not expand: return (0, 0, 0, 0) if not (exp := self.expand): m1, m2 = mult if isinstance(mult, (tuple, list)) else (mult, mult) a1, a2 = cast( "tuple[float, float]", (add if isinstance(add, (tuple, list)) else (add, add)), ) exp = (m1, a1, m2, a2) elif len(exp) == 2: exp = (*exp, *exp) return exp def clone(self): return deepcopy(self) def reset(self): """ Set the range of the scale to None. i.e Forget all the training """ self._range.reset() def is_empty(self) -> bool: """ Whether the scale has size information """ if not hasattr(self, "_range"): return True return self._range.is_empty() and self.limits is None @property def final_limits(self) -> Any: raise NotImplementedError def train_df(self, df: pd.DataFrame): """ Train scale from a dataframe """ aesthetics = sorted(set(self.aesthetics) & set(df.columns)) for ae in aesthetics: self.train(df[ae]) def map_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Map df """ if len(df) == 0: return df aesthetics = set(self.aesthetics) & set(df.columns) for ae in aesthetics: df[ae] = self.map(df[ae]) return df def get_labels(self, breaks=None) -> Sequence[str]: """ Get labels, calculating them if required """ raise NotImplementedError def get_breaks(self, limits=None): """ Get Breaks """ raise NotImplementedError def get_bounded_breaks(self, limits=None): """ Return Breaks that are within the limits """ raise NotImplementedError
scale
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/bedrock.py
{ "start": 7332, "end": 8900 }
class ____(AwsBaseWaiterTrigger): """ Trigger when a batch inference job is complete. :param job_arn: The Amazon Resource Name (ARN) of the batch inference job. :param waiter_delay: The amount of time in seconds to wait between attempts. (default: 120) :param waiter_max_attempts: The maximum number of attempts to be made. (default: 75) :param aws_conn_id: The Airflow connection used for AWS credentials. """ def __init__( self, *, job_arn: str, waiter_name: str | ArgNotSet = NOTSET, # This must be defined in the child class. waiter_delay: int = 120, waiter_max_attempts: int = 75, aws_conn_id: str | None = None, ) -> None: if waiter_name == NOTSET: raise NotImplementedError("Triggers must provide a waiter name.") super().__init__( serialized_fields={"job_arn": job_arn}, waiter_name=str(waiter_name), # Cast a string to a string to make mypy happy waiter_args={"jobIdentifier": job_arn}, failure_message="Bedrock batch inference job failed.", status_message="Status of Bedrock batch inference job is", status_queries=["status"], return_key="job_arn", return_value=job_arn, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, ) def hook(self) -> AwsGenericHook: return BedrockHook(aws_conn_id=self.aws_conn_id)
BedrockBaseBatchInferenceTrigger
python
crytic__slither
slither/tools/upgradeability/checks/variables_order.py
{ "start": 4749, "end": 5960 }
class ____(DifferentVariableContractProxy): ARGUMENT = "order-vars-contracts" HELP = "Incorrect vars order with the v2" WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#incorrect-variables-with-the-v2" WIKI_TITLE = "Incorrect variables with the v2" # region wiki_description WIKI_DESCRIPTION = """ Detect variables that are different between the original contract and the updated one. """ # endregion wiki_description # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract Contract{ uint variable1; } contract ContractV2{ address variable1; } ``` `Contract` and `ContractV2` do not have the same storage layout. As a result the storage of both contracts can be corrupted. """ # endregion wiki_exploit_scenario # region wiki_recommendation WIKI_RECOMMENDATION = """ Respect the variable order of the original contract in the updated contract. """ # endregion wiki_recommendation REQUIRE_CONTRACT = True REQUIRE_PROXY = False REQUIRE_CONTRACT_V2 = True def _contract2(self) -> Contract: assert self.contract_v2 return self.contract_v2
DifferentVariableContractNewContract
python
mlflow__mlflow
dev/clint/src/clint/rules/os_chdir_in_test.py
{ "start": 84, "end": 520 }
class ____(Rule): def _message(self) -> str: return "Do not use `os.chdir` in test directly. Use `monkeypatch.chdir` (https://docs.pytest.org/en/stable/reference/reference.html#pytest.MonkeyPatch.chdir)." @staticmethod def check(node: ast.Call, resolver: Resolver) -> bool: """ Returns True if the call is to os.chdir(). """ return resolver.resolve(node) == ["os", "chdir"]
OsChdirInTest
python
kamyu104__LeetCode-Solutions
Python/shortest-way-to-form-string.py
{ "start": 123, "end": 871 }
class ____(object): def shortestWay(self, source, target): """ :type source: str :type target: str :rtype: int """ lookup = [[None for _ in xrange(26)] for _ in xrange(len(source)+1)] find_char_next_pos = [None]*26 for i in reversed(xrange(len(source))): find_char_next_pos[ord(source[i])-ord('a')] = i+1 lookup[i] = list(find_char_next_pos) result, start = 1, 0 for c in target: start = lookup[start][ord(c)-ord('a')] if start != None: continue result += 1 start = lookup[0][ord(c)-ord('a')] if start == None: return -1 return result
Solution
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/test_setops.py
{ "start": 463, "end": 19877 }
class ____: tz = [ None, "UTC", "Asia/Tokyo", "US/Eastern", "dateutil/Asia/Singapore", "dateutil/US/Pacific", ] # TODO: moved from test_datetimelike; dedup with version below def test_union2(self, sort): everything = date_range("2020-01-01", periods=10) first = everything[:5] second = everything[5:] union = first.union(second, sort=sort) tm.assert_index_equal(union, everything) @pytest.mark.parametrize("box", [np.array, Series, list]) def test_union3(self, sort, box): everything = date_range("2020-01-01", periods=10) first = everything[:5] second = everything[5:] # GH 10149 support listlike inputs other than Index objects expected = first.union(second, sort=sort) case = box(second.values) result = first.union(case, sort=sort) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("tz", tz) def test_union(self, tz, sort): rng1 = date_range("1/1/2000", freq="D", periods=5, tz=tz, unit="ns") other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz, unit="ns") expected1 = date_range("1/1/2000", freq="D", periods=10, tz=tz, unit="ns") expected1_notsorted = DatetimeIndex(list(other1) + list(rng1)) rng2 = date_range("1/1/2000", freq="D", periods=5, tz=tz, unit="ns") other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz, unit="ns") expected2 = date_range("1/1/2000", freq="D", periods=8, tz=tz, unit="ns") expected2_notsorted = DatetimeIndex(list(other2) + list(rng2[:3])) rng3 = date_range("1/1/2000", freq="D", periods=5, tz=tz, unit="ns") other3 = DatetimeIndex([], tz=tz).as_unit("ns") expected3 = date_range("1/1/2000", freq="D", periods=5, tz=tz, unit="ns") expected3_notsorted = rng3 for rng, other, exp, exp_notsorted in [ (rng1, other1, expected1, expected1_notsorted), (rng2, other2, expected2, expected2_notsorted), (rng3, other3, expected3, expected3_notsorted), ]: result_union = rng.union(other, sort=sort) tm.assert_index_equal(result_union, exp) result_union = other.union(rng, sort=sort) if sort is None: tm.assert_index_equal(result_union, exp) else: tm.assert_index_equal(result_union, exp_notsorted) def test_union_coverage(self, sort): idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"]) ordered = DatetimeIndex(idx.sort_values(), freq="infer") result = ordered.union(idx, sort=sort) tm.assert_index_equal(result, ordered) result = ordered[:0].union(ordered, sort=sort) tm.assert_index_equal(result, ordered) assert result.freq == ordered.freq def test_union_bug_1730(self, sort): rng_a = date_range("1/1/2012", periods=4, freq="3h") rng_b = date_range("1/1/2012", periods=4, freq="4h") result = rng_a.union(rng_b, sort=sort) exp = list(rng_a) + list(rng_b[1:]) if sort is None: exp = DatetimeIndex(sorted(exp)) else: exp = DatetimeIndex(exp) tm.assert_index_equal(result, exp) def test_union_bug_1745(self, sort): left = DatetimeIndex(["2012-05-11 15:19:49.695000"]) right = DatetimeIndex( [ "2012-05-29 13:04:21.322000", "2012-05-11 15:27:24.873000", "2012-05-11 15:31:05.350000", ] ) result = left.union(right, sort=sort) exp = DatetimeIndex( [ "2012-05-11 15:19:49.695000", "2012-05-29 13:04:21.322000", "2012-05-11 15:27:24.873000", "2012-05-11 15:31:05.350000", ] ) if sort is None: exp = exp.sort_values() tm.assert_index_equal(result, exp) def test_union_bug_4564(self, sort): from pandas import DateOffset left = date_range("2013-01-01", "2013-02-01") right = left + DateOffset(minutes=15) result = left.union(right, sort=sort) exp = list(left) + list(right) if sort is None: exp = DatetimeIndex(sorted(exp)) else: exp = DatetimeIndex(exp) tm.assert_index_equal(result, exp) def test_union_freq_both_none(self, sort): # GH11086 expected = bdate_range("20150101", periods=10) expected._data.freq = None result = expected.union(expected, sort=sort) tm.assert_index_equal(result, expected) assert result.freq is None def test_union_freq_infer(self): # When taking the union of two DatetimeIndexes, we infer # a freq even if the arguments don't have freq. This matches # TimedeltaIndex behavior. dti = date_range("2016-01-01", periods=5) left = dti[[0, 1, 3, 4]] right = dti[[2, 3, 1]] assert left.freq is None assert right.freq is None result = left.union(right) tm.assert_index_equal(result, dti) assert result.freq == "D" def test_union_dataframe_index(self): rng1 = date_range("1/1/1999", "1/1/2012", freq="MS") s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1) rng2 = date_range("1/1/1980", "12/1/2001", freq="MS") s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2) df = DataFrame({"s1": s1, "s2": s2}) exp = date_range("1/1/1980", "1/1/2012", freq="MS") tm.assert_index_equal(df.index, exp) def test_union_with_DatetimeIndex(self, sort): i1 = Index(np.arange(0, 20, 2, dtype=np.int64)) i2 = date_range(start="2012-01-03 00:00:00", periods=10, freq="D") # Works i1.union(i2, sort=sort) # Fails with "AttributeError: can't set attribute" i2.union(i1, sort=sort) def test_union_same_timezone_different_units(self): # GH 55238 idx1 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("ms") idx2 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us") result = idx1.union(idx2) expected = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us") tm.assert_index_equal(result, expected) def test_union_same_nonzero_timezone_different_units(self): # GH 60080 - fix timezone being changed to UTC when units differ # but timezone is the same tz = "UTC+05:00" idx1 = date_range("2000-01-01", periods=3, tz=tz).as_unit("us") idx2 = date_range("2000-01-01", periods=3, tz=tz).as_unit("ns") # Check pre-conditions assert idx1.tz == idx2.tz assert idx1.dtype != idx2.dtype # Different units # Test union preserves timezone when units differ result = idx1.union(idx2) expected = date_range("2000-01-01", periods=3, tz=tz).as_unit("ns") tm.assert_index_equal(result, expected) def test_union_different_dates_same_timezone_different_units(self): # GH 60080 - fix timezone being changed to UTC when units differ # but timezone is the same tz = "UTC+05:00" idx1 = date_range("2000-01-01", periods=3, tz=tz).as_unit("us") idx3 = date_range("2000-01-03", periods=3, tz=tz).as_unit("us") # Test with different dates to ensure it's not just returning one of the inputs result = idx1.union(idx3) expected = DatetimeIndex( ["2000-01-01", "2000-01-02", "2000-01-03", "2000-01-04", "2000-01-05"], tz=tz, ).as_unit("us") tm.assert_index_equal(result, expected) def test_intersection_same_timezone_different_units(self): # GH 60080 - fix timezone being changed to UTC when units differ # but timezone is the same tz = "UTC+05:00" idx1 = date_range("2000-01-01", periods=3, tz=tz).as_unit("us") idx2 = date_range("2000-01-01", periods=3, tz=tz).as_unit("ns") # Check pre-conditions assert idx1.tz == idx2.tz assert idx1.dtype != idx2.dtype # Different units # Test intersection result = idx1.intersection(idx2) expected = date_range("2000-01-01", periods=3, tz=tz).as_unit("ns") tm.assert_index_equal(result, expected) def test_symmetric_difference_same_timezone_different_units(self): # GH 60080 - fix timezone being changed to UTC when units differ # but timezone is the same tz = "UTC+05:00" idx1 = date_range("2000-01-01", periods=3, tz=tz).as_unit("us") idx4 = date_range("2000-01-02", periods=3, tz=tz).as_unit("ns") # Check pre-conditions assert idx1.tz == idx4.tz assert idx1.dtype != idx4.dtype # Different units # Test symmetric_difference result = idx1.symmetric_difference(idx4) expected = DatetimeIndex(["2000-01-01", "2000-01-04"], tz=tz).as_unit("ns") tm.assert_index_equal(result, expected) # TODO: moved from test_datetimelike; de-duplicate with version below def test_intersection2(self): first = date_range("2020-01-01", periods=10) second = first[5:] intersect = first.intersection(second) tm.assert_index_equal(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: result = first.intersection(case) tm.assert_index_equal(result, second) third = Index(["a", "b", "c"]) result = first.intersection(third) expected = Index([], dtype=object) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "tz", [None, "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"] ) def test_intersection(self, tz, sort): # GH 4690 (with tz) base = date_range("6/1/2000", "6/30/2000", freq="D", name="idx", unit="ns") # if target has the same name, it is preserved rng2 = date_range("5/15/2000", "6/20/2000", freq="D", name="idx", unit="ns") expected2 = date_range("6/1/2000", "6/20/2000", freq="D", name="idx", unit="ns") # if target name is different, it will be reset rng3 = date_range("5/15/2000", "6/20/2000", freq="D", name="other", unit="ns") expected3 = date_range("6/1/2000", "6/20/2000", freq="D", name=None, unit="ns") rng4 = date_range("7/1/2000", "7/31/2000", freq="D", name="idx", unit="ns") expected4 = DatetimeIndex([], freq="D", name="idx", dtype="M8[ns]") for rng, expected in [ (rng2, expected2), (rng3, expected3), (rng4, expected4), ]: result = base.intersection(rng) tm.assert_index_equal(result, expected) assert result.freq == expected.freq # non-monotonic base = DatetimeIndex( ["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"], tz=tz, name="idx" ).as_unit("ns") rng2 = DatetimeIndex( ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], tz=tz, name="idx" ).as_unit("ns") expected2 = DatetimeIndex( ["2011-01-04", "2011-01-02"], tz=tz, name="idx" ).as_unit("ns") rng3 = DatetimeIndex( ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], tz=tz, name="other", ).as_unit("ns") expected3 = DatetimeIndex( ["2011-01-04", "2011-01-02"], tz=tz, name=None ).as_unit("ns") # GH 7880 rng4 = date_range( "7/1/2000", "7/31/2000", freq="D", tz=tz, unit="ns", name="idx" ) expected4 = DatetimeIndex([], tz=tz, name="idx").as_unit("ns") assert expected4.freq is None for rng, expected in [ (rng2, expected2), (rng3, expected3), (rng4, expected4), ]: result = base.intersection(rng, sort=sort) if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) assert result.freq == expected.freq # parametrize over both anchored and non-anchored freqs, as they # have different code paths @pytest.mark.parametrize("freq", ["min", "B"]) def test_intersection_empty(self, tz_aware_fixture, freq): # empty same freq GH2129 tz = tz_aware_fixture rng = date_range("6/1/2000", "6/15/2000", freq=freq, tz=tz) result = rng[0:0].intersection(rng) assert len(result) == 0 assert result.freq == rng.freq result = rng.intersection(rng[0:0]) assert len(result) == 0 assert result.freq == rng.freq # no overlap GH#33604 check_freq = freq != "min" # We don't preserve freq on non-anchored offsets result = rng[:3].intersection(rng[-3:]) tm.assert_index_equal(result, rng[:0]) if check_freq: # We don't preserve freq on non-anchored offsets assert result.freq == rng.freq # swapped left and right result = rng[-3:].intersection(rng[:3]) tm.assert_index_equal(result, rng[:0]) if check_freq: # We don't preserve freq on non-anchored offsets assert result.freq == rng.freq def test_intersection_bug_1708(self): from pandas import DateOffset index_1 = date_range("1/1/2012", periods=4, freq="12h") index_2 = index_1 + DateOffset(hours=1) result = index_1.intersection(index_2) assert len(result) == 0 @pytest.mark.parametrize("tz", tz) def test_difference(self, tz, sort): rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"] rng1 = DatetimeIndex(rng_dates, tz=tz) other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz) expected1 = DatetimeIndex(rng_dates, tz=tz) rng2 = DatetimeIndex(rng_dates, tz=tz) other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz) expected2 = DatetimeIndex(rng_dates[:3], tz=tz) rng3 = DatetimeIndex(rng_dates, tz=tz) other3 = DatetimeIndex([], tz=tz) expected3 = DatetimeIndex(rng_dates, tz=tz) for rng, other, expected in [ (rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3), ]: result_diff = rng.difference(other, sort) if sort is None and len(other): # We dont sort (yet?) when empty GH#24959 expected = expected.sort_values() tm.assert_index_equal(result_diff, expected) def test_difference_freq(self, sort): # GH14323: difference of DatetimeIndex should not preserve frequency index = date_range("20160920", "20160925", freq="D", unit="ns") other = date_range("20160921", "20160924", freq="D", unit="ns") expected = DatetimeIndex(["20160920", "20160925"], dtype="M8[ns]", freq=None) idx_diff = index.difference(other, sort) tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) # preserve frequency when the difference is a contiguous # subset of the original range other = date_range("20160922", "20160925", freq="D", unit="ns") idx_diff = index.difference(other, sort) expected = DatetimeIndex(["20160920", "20160921"], dtype="M8[ns]", freq="D") tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) def test_datetimeindex_diff(self, sort): dti1 = date_range(freq="QE-JAN", start=datetime(1997, 12, 31), periods=100) dti2 = date_range(freq="QE-JAN", start=datetime(1997, 12, 31), periods=98) assert len(dti1.difference(dti2, sort)) == 2 @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Eastern"]) def test_setops_preserve_freq(self, tz): rng = date_range("1/1/2000", "1/1/2002", name="idx", tz=tz) result = rng[:50].union(rng[50:100]) assert result.name == rng.name assert result.freq == rng.freq assert result.tz == rng.tz result = rng[:50].union(rng[30:100]) assert result.name == rng.name assert result.freq == rng.freq assert result.tz == rng.tz result = rng[:50].union(rng[60:100]) assert result.name == rng.name assert result.freq is None assert result.tz == rng.tz result = rng[:50].intersection(rng[25:75]) assert result.name == rng.name assert result.freqstr == "D" assert result.tz == rng.tz nofreq = DatetimeIndex(list(rng[25:75]), name="other") result = rng[:50].union(nofreq) assert result.name is None assert result.freq == rng.freq assert result.tz == rng.tz result = rng[:50].intersection(nofreq) assert result.name is None assert result.freq == rng.freq assert result.tz == rng.tz def test_intersection_non_tick_no_fastpath(self): # GH#42104 dti = DatetimeIndex( [ "2018-12-31", "2019-03-31", "2019-06-30", "2019-09-30", "2019-12-31", "2020-03-31", ], freq="QE-DEC", ) result = dti[::2].intersection(dti[1::2]) expected = dti[:0] tm.assert_index_equal(result, expected) def test_dti_intersection(self): rng = date_range("1/1/2011", periods=100, freq="h", tz="utc") left = rng[10:90][::-1] right = rng[20:80][::-1] assert left.tz == rng.tz result = left.intersection(right) assert result.tz == left.tz # Note: not difference, as there is no symmetry requirement there @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) def test_dti_setop_aware(self, setop): # non-overlapping # GH#39328 as of 2.0 we cast these to UTC instead of object rng = date_range("2012-11-15 00:00:00", periods=6, freq="h", tz="US/Central") rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="h", tz="US/Eastern") result = getattr(rng, setop)(rng2) left = rng.tz_convert("UTC") right = rng2.tz_convert("UTC") expected = getattr(left, setop)(right) tm.assert_index_equal(result, expected) assert result.tz == left.tz if len(result): assert result[0].tz is timezone.utc assert result[-1].tz is timezone.utc def test_dti_union_mixed(self): # GH#21671 rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") result = rng.union(rng2) expected = Index( [ Timestamp("2011-01-01"), pd.NaT, Timestamp("2012-01-01", tz="Asia/Tokyo"), Timestamp("2012-01-02", tz="Asia/Tokyo"), ], dtype=object, ) tm.assert_index_equal(result, expected)
TestDatetimeIndexSetOps
python
tiangolo__fastapi
docs_src/response_model/tutorial004_py310.py
{ "start": 78, "end": 595 }
class ____(BaseModel): name: str description: str | None = None price: float tax: float = 10.5 tags: list[str] = [] items = { "foo": {"name": "Foo", "price": 50.2}, "bar": {"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2}, "baz": {"name": "Baz", "description": None, "price": 50.2, "tax": 10.5, "tags": []}, } @app.get("/items/{item_id}", response_model=Item, response_model_exclude_unset=True) async def read_item(item_id: str): return items[item_id]
Item
python
kamyu104__LeetCode-Solutions
Python/total-characters-in-string-after-transformations-ii.py
{ "start": 94, "end": 1161 }
class ____(object): def lengthAfterTransformations(self, s, t, nums): """ :type s: str :type t: int :type nums: List[int] :rtype: int """ MOD = 10**9+7 def matrix_mult(A, B): ZB = zip(*B) return [[sum(a*b % MOD for a, b in itertools.izip(row, col)) % MOD for col in ZB] for row in A] def matrix_expo(A, K): result = [[int(i == j) for j in xrange(len(A))] for i in xrange(len(A))] while K: if K % 2: result = matrix_mult(result, A) A = matrix_mult(A, A) K /= 2 return result cnt = [0]*26 for x in s: cnt[ord(x)-ord('a')] += 1 matrix = [[0]*26 for _ in xrange(26)] for i in xrange(len(nums)): for j in xrange(1, nums[i]+1): matrix[i][(i+j)%26] = 1 matrix_pow_t = matrix_expo(matrix, t) return reduce(lambda accu, x: (accu+x)%MOD, matrix_mult([cnt], matrix_pow_t)[0], 0)
Solution
python
sanic-org__sanic
sanic/middleware.py
{ "start": 331, "end": 3027 }
class ____: """Middleware object that is used to encapsulate middleware functions. This should generally not be instantiated directly, but rather through the `sanic.Sanic.middleware` decorator and its variants. Args: func (MiddlewareType): The middleware function to be called. location (MiddlewareLocation): The location of the middleware. priority (int): The priority of the middleware. """ _counter = count() count: int __slots__ = ("func", "priority", "location", "definition") def __init__( self, func: MiddlewareType, location: MiddlewareLocation, priority: int = 0, ) -> None: self.func = func self.priority = priority self.location = location self.definition = next(Middleware._counter) def __call__(self, *args, **kwargs): return self.func(*args, **kwargs) def __hash__(self) -> int: return hash(self.func) def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"func=<function {self.func.__name__}>, " f"priority={self.priority}, " f"location={self.location.name})" ) @property def order(self) -> tuple[int, int]: """Return a tuple of the priority and definition order. This is used to sort the middleware. Returns: tuple[int, int]: The priority and definition order. """ return (self.priority, -self.definition) @classmethod def convert( cls, *middleware_collections: Sequence[Union[Middleware, MiddlewareType]], location: MiddlewareLocation, ) -> Deque[Middleware]: """Convert middleware collections to a deque of Middleware objects. Args: *middleware_collections (Sequence[Union[Middleware, MiddlewareType]]): The middleware collections to convert. location (MiddlewareLocation): The location of the middleware. Returns: Deque[Middleware]: The converted middleware. """ # noqa: E501 return deque( [ middleware if isinstance(middleware, Middleware) else Middleware(middleware, location) for collection in middleware_collections for middleware in collection ] ) @classmethod def reset_count(cls) -> None: """Reset the counter for the middleware definition order. This is used for testing. Returns: None """ cls._counter = count() cls.count = next(cls._counter)
Middleware
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-azure-inference/llama_index/llms/azure_inference/base.py
{ "start": 4145, "end": 20116 }
class ____(FunctionCallingLLM): """ Azure AI model inference for LLM. Examples: ```python from llama_index.core import Settings from llama_index.core.llms import ChatMessage from llama_index.llms.azure_inference import AzureAICompletionsModel llm = AzureAICompletionsModel( endpoint="https://[your-endpoint].inference.ai.azure.com", credential="your-api-key", temperature=0 ) # If using Microsoft Entra ID authentication, you can create the # client as follows: # # from azure.identity import DefaultAzureCredential # # llm = AzureAICompletionsModel( # endpoint="https://[your-endpoint].inference.ai.azure.com", # credential=DefaultAzureCredential() # ) # # # If you plan to use asynchronous calling, make sure to use the async # # credentials as follows: # # from azure.identity.aio import DefaultAzureCredential as DefaultAzureCredentialAsync # # llm = AzureAICompletionsModel( # endpoint="https://[your-endpoint].inference.ai.azure.com", # credential=DefaultAzureCredentialAsync() # ) resp = llm.chat( messages=ChatMessage(role="user", content="Who is Paul Graham?") ) print(resp) # Once the client is instantiated, you can set the context to use the model Settings.llm = llm ``` """ model_config = ConfigDict(protected_namespaces=()) model_name: Optional[str] = Field( default=None, description="The model id to use. Optional for endpoints running a single model.", ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", ge=0.0, le=1.0, ) max_tokens: Optional[int] = Field( default=None, description="The maximum number of tokens to generate.", gt=0, ) seed: str = Field(default=None, description="The random seed to use for sampling.") model_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs model parameters.", ) _client: ChatCompletionsClient = PrivateAttr() _async_client: ChatCompletionsClientAsync = PrivateAttr() _model_name: str = PrivateAttr(None) _model_type: str = PrivateAttr(None) _model_provider: str = PrivateAttr(None) def __init__( self, endpoint: str = None, credential: Union[str, AzureKeyCredential, "TokenCredential"] = None, temperature: float = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = None, model_name: Optional[str] = None, api_version: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, client_kwargs: Dict[str, Any] = None, **kwargs: Dict[str, Any], ) -> None: client_kwargs = client_kwargs or {} callback_manager = callback_manager or CallbackManager([]) endpoint = get_from_param_or_env( "endpoint", endpoint, "AZURE_INFERENCE_ENDPOINT", None ) credential = get_from_param_or_env( "credential", credential, "AZURE_INFERENCE_CREDENTIAL", None ) credential = ( AzureKeyCredential(credential) if isinstance(credential, str) else credential ) if not endpoint: raise ValueError( "You must provide an endpoint to use the Azure AI model inference LLM." "Pass the endpoint as a parameter or set the AZURE_INFERENCE_ENDPOINT" "environment variable." ) if not credential: raise ValueError( "You must provide an credential to use the Azure AI model inference LLM." "Pass the credential as a parameter or set the AZURE_INFERENCE_CREDENTIAL" ) if api_version: client_kwargs["api_version"] = api_version super().__init__( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, **kwargs, ) self._client = ChatCompletionsClient( endpoint=endpoint, credential=credential, user_agent="llamaindex", **client_kwargs, ) self._async_client = ChatCompletionsClientAsync( endpoint=endpoint, credential=credential, user_agent="llamaindex", **client_kwargs, ) @classmethod def class_name(cls) -> str: return "AzureAICompletionsModel" @property def metadata(self) -> LLMMetadata: if not self._model_name: model_info = None try: # Get model info from the endpoint. This method may not be supported by all # endpoints. model_info = self._client.get_model_info() except HttpResponseError: logger.warning( f"Endpoint '{self._client._config.endpoint}' does not support model metadata retrieval. " "Failed to get model info for method `metadata()`." ) finally: if model_info: self._model_name = model_info.get("model_name", None) self._model_type = model_info.get("model_type", None) self._model_provider = model_info.get("model_provider_name", None) else: self._model_name = self.model_name or "unknown" self._model_type = "unknown" self._model_provider = "unknown" return LLMMetadata( is_chat_model=self._model_type == "chat-completions", model_name=self._model_name, model_type=self._model_type, model_provider=self._model_provider, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, "max_tokens": self.max_tokens, } if self.model_name: base_kwargs["model"] = self.model_name return { **base_kwargs, **self.model_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: messages = to_inference_message(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.complete(messages=messages, **all_kwargs) response_message = from_inference_message(response.choices[0].message) return ChatResponse( message=response_message, raw=response.as_dict(), ) def _to_azure_tool_choice( self, tool_required: bool ) -> Optional[ Union[str, ChatCompletionsToolChoicePreset, ChatCompletionsNamedToolChoice] ]: if tool_required: return ChatCompletionsToolChoicePreset.REQUIRED else: return ChatCompletionsToolChoicePreset.AUTO @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: complete_fn = chat_to_completion_decorator(self.chat) return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: messages = to_inference_message(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.complete(messages=messages, stream=True, **all_kwargs) def gen() -> ChatResponseGen: content = "" role = MessageRole.ASSISTANT for chunk in response: content_delta = ( chunk.choices[0].delta.content if len(chunk.choices) > 0 else None ) if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) return stream_complete_fn(prompt, **kwargs) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: messages = to_inference_message(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await self._async_client.complete(messages=messages, **all_kwargs) response_message = from_inference_message(response.choices[0].message) return ChatResponse( message=response_message, raw=response.as_dict(), ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: acomplete_fn = achat_to_completion_decorator(self.achat) return await acomplete_fn(prompt, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: messages = to_inference_message(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await self._async_client.complete( messages=messages, stream=True, **all_kwargs ) async def gen() -> ChatResponseAsyncGen: content = "" role = MessageRole.ASSISTANT async for chunk in response: content_delta = ( chunk.choices[0].delta.content if chunk.choices else None ) if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat) return await astream_complete_fn(prompt, stream=True, **kwargs) def chat_with_tools( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, **kwargs: Any, ) -> ChatResponse: """Predict and call the tool.""" # Azure AI model inference uses the same openai tool format tool_specs = [ tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools ] if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) messages = chat_history or [] if user_msg: messages.append(user_msg) response = self.chat( messages, tools=tool_specs, tool_choice=self._to_azure_tool_choice(tool_required), **kwargs, ) if not allow_parallel_tool_calls: force_single_tool_call(response) return response async def achat_with_tools( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, **kwargs: Any, ) -> ChatResponse: """Predict and call the tool.""" # Azure AI model inference uses the same openai tool format tool_specs = [ tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools ] if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) messages = chat_history or [] if user_msg: messages.append(user_msg) response = await self.achat( messages, tools=tool_specs, tool_choice=self._to_azure_tool_choice(tool_required), **kwargs, ) if not allow_parallel_tool_calls: force_single_tool_call(response) return response def get_tool_calls_from_response( self, response: "AgentChatResponse", error_on_no_tool_call: bool = True, ) -> List[ToolSelection]: """Predict and call the tool.""" tool_calls = response.message.additional_kwargs.get("tool_calls", []) if len(tool_calls) < 1: if error_on_no_tool_call: raise ValueError( f"Expected at least one tool call, but got {len(tool_calls)} tool calls." ) else: return [] tool_selections = [] for tool_call in tool_calls: if not isinstance(tool_call, ChatCompletionsToolCall): raise ValueError("Invalid tool_call object") if tool_call.type != "function": raise ValueError( "Invalid tool type. Only `function` is supported but `{tool_call.type}` was received." ) argument_dict = json.loads(tool_call.function.arguments) tool_selections.append( ToolSelection( tool_id=tool_call.id, tool_name=tool_call.function.name, tool_kwargs=argument_dict, ) ) return tool_selections def _prepare_chat_with_tools( self, tools: List["BaseTool"], user_msg: Optional[Union[str, ChatMessage]] = None, chat_history: Optional[List[ChatMessage]] = None, verbose: bool = False, allow_parallel_tool_calls: bool = False, tool_required: bool = False, **kwargs: Any, ) -> Dict[str, Any]: """Prepare the arguments needed to let the LLM chat with tools.""" chat_history = chat_history or [] if isinstance(user_msg, str): user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) chat_history.append(user_msg) tool_dicts = [to_inference_tool(tool.metadata) for tool in tools] return { "messages": chat_history, "tools": tool_dicts or None, "tool_choice": self._to_azure_tool_choice(tool_required), **kwargs, }
AzureAICompletionsModel
python
neetcode-gh__leetcode
python/0104-maximum-depth-of-binary-tree.py
{ "start": 216, "end": 577 }
class ____: def maxDepth(self, root: TreeNode) -> int: stack = [[root, 1]] res = 0 while stack: node, depth = stack.pop() if node: res = max(res, depth) stack.append([node.left, depth + 1]) stack.append([node.right, depth + 1]) return res # BFS
Solution
python
prabhupant__python-ds
data_structures/binary_trees/spiral_tree.py
{ "start": 59, "end": 1144 }
class ____: def __init__(self, val): self.val = val self.left = None self.right = None def height(root): if not root: return 0 lheight = height(root.left) rheight = height(root.right) return 1 + max(lheight, rheight) def print_spiral(root): h = height(root) left_to_right = False for i in range(1, h+1): print_level(root, i, left_to_right) left_to_right = not left_to_right def print_level(root, level, left_to_right): if not root: return if level == 1: print(root.val, end=' ') elif level > 1: if left_to_right: print_level(root.left, level-1, left_to_right) print_level(root.right, level-1, left_to_right) else: print_level(root.right, level-1, left_to_right) print_level(root.left, level-1, left_to_right) root = Node(1) root.left = Node(2) root.right = Node(3) root.left.left = Node(7) root.left.right = Node(6) root.right.left = Node(5) root.right.right = Node(4) print_spiral(root)
Node
python
python-pillow__Pillow
src/PIL/PngImagePlugin.py
{ "start": 4679, "end": 7611 }
class ____: def __init__(self, fp: IO[bytes]) -> None: self.fp: IO[bytes] | None = fp self.queue: list[tuple[bytes, int, int]] | None = [] def read(self) -> tuple[bytes, int, int]: """Fetch a new chunk. Returns header information.""" cid = None assert self.fp is not None if self.queue: cid, pos, length = self.queue.pop() self.fp.seek(pos) else: s = self.fp.read(8) cid = s[4:] pos = self.fp.tell() length = i32(s) if not is_cid(cid): if not ImageFile.LOAD_TRUNCATED_IMAGES: msg = f"broken PNG file (chunk {repr(cid)})" raise SyntaxError(msg) return cid, pos, length def __enter__(self) -> ChunkStream: return self def __exit__(self, *args: object) -> None: self.close() def close(self) -> None: self.queue = self.fp = None def push(self, cid: bytes, pos: int, length: int) -> None: assert self.queue is not None self.queue.append((cid, pos, length)) def call(self, cid: bytes, pos: int, length: int) -> bytes: """Call the appropriate chunk handler""" logger.debug("STREAM %r %s %s", cid, pos, length) return getattr(self, f"chunk_{cid.decode('ascii')}")(pos, length) def crc(self, cid: bytes, data: bytes) -> None: """Read and verify checksum""" # Skip CRC checks for ancillary chunks if allowed to load truncated # images # 5th byte of first char is 1 [specs, section 5.4] if ImageFile.LOAD_TRUNCATED_IMAGES and (cid[0] >> 5 & 1): self.crc_skip(cid, data) return assert self.fp is not None try: crc1 = _crc32(data, _crc32(cid)) crc2 = i32(self.fp.read(4)) if crc1 != crc2: msg = f"broken PNG file (bad header checksum in {repr(cid)})" raise SyntaxError(msg) except struct.error as e: msg = f"broken PNG file (incomplete checksum in {repr(cid)})" raise SyntaxError(msg) from e def crc_skip(self, cid: bytes, data: bytes) -> None: """Read checksum""" assert self.fp is not None self.fp.read(4) def verify(self, endchunk: bytes = b"IEND") -> list[bytes]: # Simple approach; just calculate checksum for all remaining # blocks. Must be called directly after open. cids = [] assert self.fp is not None while True: try: cid, pos, length = self.read() except struct.error as e: msg = "truncated PNG file" raise OSError(msg) from e if cid == endchunk: break self.crc(cid, ImageFile._safe_read(self.fp, length)) cids.append(cid) return cids
ChunkStream
python
allegroai__clearml
clearml/backend_api/services/v2_20/tasks.py
{ "start": 423087, "end": 444453 }
class ____(Request): """ Validate task properties (before create) :param name: Task name. Unique within the company. :type name: str :param tags: User-defined tags list :type tags: Sequence[str] :param system_tags: System tags list. This field is reserved for system use, please don't use it. :type system_tags: Sequence[str] :param type: Type of task :type type: TaskTypeEnum :param comment: Free text comment :type comment: str :param parent: Parent task id Must be a completed task. :type parent: str :param project: Project ID of the project to which this task is assigned Must exist[ab] :type project: str :param output_dest: Output storage id Must be a reference to an existing storage. :type output_dest: str :param execution: Task execution params :type execution: Execution :param hyperparams: Task hyper params per section :type hyperparams: dict :param configuration: Task configuration params :type configuration: dict :param script: Script info :type script: Script :param models: Task models :type models: TaskModels :param container: Docker container parameters :type container: dict """ _service = "tasks" _action = "validate" _version = "2.20" _schema = { "definitions": { "artifact": { "properties": { "content_size": { "description": "Raw data length in bytes", "type": "integer", }, "display_data": { "description": "User-defined list of key/value pairs, sorted", "items": {"items": {"type": "string"}, "type": "array"}, "type": "array", }, "hash": { "description": "Hash of entire raw data", "type": "string", }, "key": {"description": "Entry key", "type": "string"}, "mode": { "$ref": "#/definitions/artifact_mode_enum", "description": "System defined input/output indication", }, "timestamp": { "description": "Epoch time when artifact was created", "type": "integer", }, "type": {"description": "System defined type", "type": "string"}, "type_data": { "$ref": "#/definitions/artifact_type_data", "description": "Additional fields defined by the system", }, "uri": {"description": "Raw data location", "type": "string"}, }, "required": ["key", "type"], "type": "object", }, "artifact_mode_enum": { "default": "output", "enum": ["input", "output"], "type": "string", }, "artifact_type_data": { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", }, "configuration_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. Should be unique", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "execution": { "properties": { "artifacts": { "description": "Task artifacts", "items": {"$ref": "#/definitions/artifact"}, "type": ["array", "null"], }, "framework": { "description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ", "type": ["string", "null"], }, "model_desc": { "additionalProperties": True, "description": "Json object representing the Model descriptors", "type": ["object", "null"], }, "model_labels": { "additionalProperties": {"type": "integer"}, "description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks", "type": ["object", "null"], }, "parameters": { "additionalProperties": True, "description": "Json object containing the Task parameters", "type": ["object", "null"], }, "queue": { "description": "Queue ID where task was queued.", "type": ["string", "null"], }, }, "type": "object", }, "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "script": { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": "Repository branch id If not provided and tag not provided, default repository branch is used.", "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": { "description": "Repository tag", "type": ["string", "null"], }, "version_num": { "description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.", "type": ["string", "null"], }, "working_dir": { "description": "Path to the folder from which to run the script Default - root folder of repository", "type": ["string", "null"], }, }, "type": "object", }, "section_params": { "additionalProperties": {"$ref": "#/definitions/params_item"}, "description": "Task section params", "type": "object", }, "task_model_item": { "properties": { "model": {"description": "The model ID", "type": "string"}, "name": {"description": "The task model name", "type": "string"}, }, "required": ["name", "model"], "type": "object", }, "task_models": { "properties": { "input": { "description": "The list of task input models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, "output": { "description": "The list of task output models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, }, "type": "object", }, "task_type_enum": { "enum": [ "training", "testing", "inference", "data_processing", "application", "monitor", "controller", "optimizer", "service", "qc", "custom", ], "type": "string", }, }, "properties": { "comment": {"description": "Free text comment ", "type": "string"}, "configuration": { "additionalProperties": {"$ref": "#/definitions/configuration_item"}, "description": "Task configuration params", "type": "object", }, "container": { "type": "object", "description": "Docker container parameters", "additionalProperties": {"type": ["string", "null"]}, }, "execution": { "$ref": "#/definitions/execution", "description": "Task execution params", }, "hyperparams": { "additionalProperties": {"$ref": "#/definitions/section_params"}, "description": "Task hyper params per section", "type": "object", }, "models": { "$ref": "#/definitions/task_models", "description": "Task models", }, "name": { "description": "Task name. Unique within the company.", "type": "string", }, "output_dest": { "description": "Output storage id Must be a reference to an existing storage.", "type": "string", }, "parent": { "description": "Parent task id Must be a completed task.", "type": "string", }, "project": { "description": "Project ID of the project to which this task is assigned Must exist[ab]", "type": "string", }, "script": {"$ref": "#/definitions/script", "description": "Script info"}, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": "array", }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": "array", }, "type": { "$ref": "#/definitions/task_type_enum", "description": "Type of task", }, }, "required": ["name", "type"], "type": "object", } def __init__( self, name: str, type: Any, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, comment: Optional[str] = None, parent: Optional[str] = None, project: Optional[str] = None, output_dest: Optional[str] = None, execution: Any = None, hyperparams: Optional[dict] = None, configuration: Optional[dict] = None, script: Any = None, models: Any = None, container: Optional[dict] = None, **kwargs: Any ) -> None: super(ValidateRequest, self).__init__(**kwargs) self.name = name self.tags = tags self.system_tags = system_tags self.type = type self.comment = comment self.parent = parent self.project = project self.output_dest = output_dest self.execution = execution self.hyperparams = hyperparams self.configuration = configuration self.script = script self.models = models self.container = container @schema_property("name") def name(self) -> str: return self._property_name @name.setter def name(self, value: str) -> None: if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("tags") def tags(self) -> Optional[List[str]]: return self._property_tags @tags.setter def tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self) -> Optional[List[str]]: return self._property_system_tags @system_tags.setter def system_tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("type") def type(self) -> Any: return self._property_type @type.setter def type(self, value: Any) -> None: if value is None: self._property_type = None return if isinstance(value, six.string_types): try: value = TaskTypeEnum(value) except ValueError: pass else: self.assert_isinstance(value, "type", enum.Enum) self._property_type = value @schema_property("comment") def comment(self) -> Optional[str]: return self._property_comment @comment.setter def comment(self, value: Optional[str]) -> None: if value is None: self._property_comment = None return self.assert_isinstance(value, "comment", six.string_types) self._property_comment = value @schema_property("parent") def parent(self) -> Optional[str]: return self._property_parent @parent.setter def parent(self, value: Optional[str]) -> None: if value is None: self._property_parent = None return self.assert_isinstance(value, "parent", six.string_types) self._property_parent = value @schema_property("project") def project(self) -> Optional[str]: return self._property_project @project.setter def project(self, value: Optional[str]) -> None: if value is None: self._property_project = None return self.assert_isinstance(value, "project", six.string_types) self._property_project = value @schema_property("output_dest") def output_dest(self) -> Optional[str]: return self._property_output_dest @output_dest.setter def output_dest(self, value: Optional[str]) -> None: if value is None: self._property_output_dest = None return self.assert_isinstance(value, "output_dest", six.string_types) self._property_output_dest = value @schema_property("execution") def execution(self) -> Any: return self._property_execution @execution.setter def execution(self, value: Any) -> None: if value is None: self._property_execution = None return if isinstance(value, dict): value = Execution.from_dict(value) else: self.assert_isinstance(value, "execution", Execution) self._property_execution = value @schema_property("hyperparams") def hyperparams(self) -> Optional[dict]: return self._property_hyperparams @hyperparams.setter def hyperparams(self, value: Optional[dict]) -> None: if value is None: self._property_hyperparams = None return self.assert_isinstance(value, "hyperparams", dict) self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True) self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True) value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items())) self._property_hyperparams = value @schema_property("configuration") def configuration(self) -> Optional[dict]: return self._property_configuration @configuration.setter def configuration(self, value: Optional[dict]) -> None: if value is None: self._property_configuration = None return self.assert_isinstance(value, "configuration", dict) self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True) self.assert_isinstance( value.values(), "configuration_values", (ConfigurationItem, dict), is_array=True, ) value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items())) self._property_configuration = value @schema_property("script") def script(self) -> Any: return self._property_script @script.setter def script(self, value: Any) -> None: if value is None: self._property_script = None return if isinstance(value, dict): value = Script.from_dict(value) else: self.assert_isinstance(value, "script", Script) self._property_script = value @schema_property("models") def models(self) -> Any: return self._property_models @models.setter def models(self, value: Any) -> None: if value is None: self._property_models = None return if isinstance(value, dict): value = TaskModels.from_dict(value) else: self.assert_isinstance(value, "models", TaskModels) self._property_models = value @schema_property("container") def container(self) -> Optional[dict]: return self._property_container @container.setter def container(self, value: Optional[dict]) -> None: if value is None: self._property_container = None return self.assert_isinstance(value, "container", (dict,)) self._property_container = value
ValidateRequest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_axis32.py
{ "start": 315, "end": 1399 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_axis32.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "area"}) chart.axis_ids = [96171520, 96173056] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] chart.set_x_axis({"position_axis": "between"}) worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pytorch__pytorch
torch/distributed/_shard/sharded_tensor/api.py
{ "start": 2339, "end": 6735 }
class ____(torch.Tensor): _sharding_spec: shard_spec.ShardingSpec _metadata: ShardedTensorMetadata _local_shards: list[Shard] def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs): # Use __new__ to construct a wrapper tensor, for recording tensor # properties and logging purposes. torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor") # check sharding spec and build sharded tensor metadata if not isinstance(sharding_spec, shard_spec.ShardingSpec): raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}") sizes = _flatten_tensor_size(size) dtype = kwargs["dtype"] layout = kwargs["layout"] pin_memory = kwargs["pin_memory"] requires_grad = kwargs["requires_grad"] if dtype is None: dtype = torch.get_default_dtype() tensor_properties = TensorProperties( dtype, layout, requires_grad, pin_memory=pin_memory ) sharded_tensor_metadata = sharding_spec.build_metadata( sizes, tensor_properties=tensor_properties ) r = torch.Tensor._make_wrapper_subclass( cls, sizes, dtype=dtype, layout=layout, pin_memory=pin_memory, requires_grad=requires_grad, ) # set sharding spec r._sharding_spec = sharding_spec # set metadata r._metadata = sharded_tensor_metadata # set local shards r._local_shards = [] return r def metadata(self) -> ShardedTensorMetadata: """ Returns a :class:`ShardedTensorMetadata` object corresponding to the metadata for the entire tensor. """ return self._metadata def local_shards(self) -> list[Shard]: """ Returns a list of :class:`Shard' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor. """ return self._local_shards @classmethod def _init_from_local_shards_and_global_metadata( cls, local_shards: list[Shard], sharded_tensor_metadata: ShardedTensorMetadata, sharding_spec=None, ) -> ShardedTensorBase: """ Initialize a ShardedTensorBase with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank """ shards_metadata = sharded_tensor_metadata.shards_metadata tensor_properties = sharded_tensor_metadata.tensor_properties if len(shards_metadata) == 0: raise ValueError("shards_metadata must not be empty!") if tensor_properties.layout != torch.strided: raise ValueError("Only torch.strided layout is currently supported") if sharding_spec is None: spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata) else: spec = sharding_spec sharded_tensor_base = ShardedTensorBase.__new__( ShardedTensor, spec, sharded_tensor_metadata.size, dtype=tensor_properties.dtype, layout=tensor_properties.layout, pin_memory=tensor_properties.pin_memory, requires_grad=tensor_properties.requires_grad, ) # check if shards_metadata have overlap shards validate_non_overlapping_shards_metadata(shards_metadata) # check if the shards_metadata is compatible with overall size of the sharded tensor. check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) # done validation, add local_shards sharded_tensor_base._local_shards = local_shards return sharded_tensor_base @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # type: ignore[override] raise RuntimeError( f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} " "but the there is no custom __torch_dispatch__ implementation for it." )
ShardedTensorBase
python
sqlalchemy__sqlalchemy
examples/generic_associations/table_per_related.py
{ "start": 1334, "end": 1844 }
class ____: """Define columns that will be present in each 'Address' table. This is a declarative mixin, so additional mapped attributes beyond simple columns specified here should be set up using @declared_attr. """ street: Mapped[str] city: Mapped[str] zip: Mapped[str] def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % ( self.__class__.__name__, self.street, self.city, self.zip, )
Address
python
django-haystack__django-haystack
test_haystack/test_fields.py
{ "start": 6724, "end": 8339 }
class ____(TestCase): def test_init(self): try: foo = NgramField(model_attr="foo") except: self.fail() self.assertRaises(SearchFieldError, NgramField, faceted=True) def test_prepare(self): mock = MockModel() mock.user = "daniel" author = NgramField(model_attr="user") self.assertEqual(author.prepare(mock), "daniel") # Do a lookup through the relation. mock_tag = MockTag.objects.create(name="primary") mock = MockModel() mock.tag = mock_tag tag_name = NgramField(model_attr="tag__name") self.assertEqual(tag_name.prepare(mock), "primary") # Use the default. mock = MockModel() author = NgramField(model_attr="author", default="") self.assertEqual(author.prepare(mock), "") # Simulate failed lookups. mock_tag = MockTag.objects.create(name="primary") mock = MockModel() mock.tag = mock_tag tag_slug = NgramField(model_attr="tag__slug") self.assertRaises(SearchFieldError, tag_slug.prepare, mock) # Simulate default='foo'. mock = MockModel() default = NgramField(default="foo") self.assertEqual(default.prepare(mock), "foo") # Simulate null=True. mock = MockModel() empty = NgramField(null=True) self.assertEqual(empty.prepare(mock), None) mock = MockModel() mock.user = None author = NgramField(model_attr="user", null=True) self.assertEqual(author.prepare(mock), None)
NgramFieldTestCase
python
wandb__wandb
wandb/vendor/pygments/lexers/sql.py
{ "start": 9483, "end": 12463 }
class ____(Lexer): """ Lexer for psql sessions. .. versionadded:: 1.5 """ name = 'PostgreSQL console (psql)' aliases = ['psql', 'postgresql-console', 'postgres-console'] mimetypes = ['text/x-postgresql-psql'] def get_tokens_unprocessed(self, data): sql = PsqlRegexLexer(**self.options) lines = lookahead(line_re.findall(data)) # prompt-output cycle while 1: # consume the lines of the command: start with an optional prompt # and continue until the end of command is detected curcode = '' insertions = [] while 1: try: line = next(lines) except StopIteration: # allow the emission of partially collected items # the repl loop will be broken below break # Identify a shell prompt in case of psql commandline example if line.startswith('$') and not curcode: lexer = get_lexer_by_name('console', **self.options) for x in lexer.get_tokens_unprocessed(line): yield x break # Identify a psql prompt mprompt = re_prompt.match(line) if mprompt is not None: insertions.append((len(curcode), [(0, Generic.Prompt, mprompt.group())])) curcode += line[len(mprompt.group()):] else: curcode += line # Check if this is the end of the command # TODO: better handle multiline comments at the end with # a lexer with an external state? if re_psql_command.match(curcode) \ or re_end_command.search(curcode): break # Emit the combined stream of command and prompt(s) for item in do_insertions(insertions, sql.get_tokens_unprocessed(curcode)): yield item # Emit the output lines out_token = Generic.Output while 1: line = next(lines) mprompt = re_prompt.match(line) if mprompt is not None: # push the line back to have it processed by the prompt lines.send(line) break mmsg = re_message.match(line) if mmsg is not None: if mmsg.group(1).startswith("ERROR") \ or mmsg.group(1).startswith("FATAL"): out_token = Generic.Error yield (mmsg.start(1), Generic.Strong, mmsg.group(1)) yield (mmsg.start(2), out_token, mmsg.group(2)) else: yield (0, out_token, line)
PostgresConsoleLexer
python
readthedocs__readthedocs.org
readthedocs/organizations/tests/test_orgs.py
{ "start": 6680, "end": 7513 }
class ____(OrganizationTestCase): def test_member_add_regression(self): """Test owner add from regression from previous functionality.""" self.assertEqual(self.organization.members.count(), 1) self.add_member(username="tester") self.assertEqual(self.organization.members.count(), 1) self.assertEqual(self.organization.owners.count(), 1) def test_member_delete_regression(self): """Test member delete from regression from previous functionality.""" self.test_member_add_regression() data = {"user": "tester"} resp = self.client.post( "/organizations/mozilla/members/delete/", data=data, ) self.assertEqual(resp.status_code, 404) self.assertEqual(self.organization.members.count(), 1)
OrganizationMemberTests
python
pydantic__pydantic
pydantic/v1/errors.py
{ "start": 6954, "end": 7047 }
class ____(PydanticTypeError): msg_template = 'value is not a valid sequence'
SequenceError
python
getsentry__sentry
tests/sentry/issues/endpoints/test_group_attachments.py
{ "start": 318, "end": 11149 }
class ____(APITestCase): def create_attachment( self, type: str | None = None, event_id: str | None = None, file_name: str = "hello.png", group_id: int | None = None, ) -> EventAttachment: if type is None: type = "event.attachment" self.attachment = EventAttachment.objects.create( event_id=event_id or self.event.event_id, project_id=self.event.project_id, group_id=group_id or self.group.id, type=type, name=file_name, blob_path=":File contents here", ) return self.attachment def path( self, types: Sequence[str] | None = None, event_ids: Sequence[str] | None = None, screenshot: bool = False, ) -> str: path = f"/api/0/issues/{self.group.id}/attachments/" query = [("types", t) for t in types or ()] query.extend([("event_id", id) for id in event_ids or ()]) if screenshot: query.append(("screenshot", "1")) if query: path += "?" + urlencode(query) return path def test_basic(self) -> None: self.login_as(user=self.user) attachment = self.create_attachment() with self.feature("organizations:event-attachments"): response = self.client.get(self.path()) assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["id"] == str(attachment.id) def test_filter(self) -> None: self.login_as(user=self.user) self.create_attachment(type="event.attachment") attachment2 = self.create_attachment(type="event.minidump") with self.feature("organizations:event-attachments"): response = self.client.get(self.path(types=["event.minidump"])) assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["id"] == str(attachment2.id) def test_screenshot_across_groups(self) -> None: self.login_as(user=self.user) min_ago = before_now(minutes=1).isoformat() group1_event = self.store_event( data={"fingerprint": ["group1"], "timestamp": min_ago}, project_id=self.project.id ) self.create_attachment(file_name="screenshot.png", event_id=group1_event.event_id) self.create_attachment(file_name="screenshot-1.png", event_id=group1_event.event_id) # This will not be included as name doesn't contain 'screenshot' self.create_attachment(file_name="foo.png", event_id=group1_event.event_id) group2_event = self.store_event( data={"fingerprint": ["group2"], "timestamp": min_ago}, project_id=self.project.id ) self.create_attachment(file_name="crash_screenshot.png", event_id=group2_event.event_id) with self.feature("organizations:event-attachments"): response = self.client.get(self.path(screenshot=True)) assert response.status_code == 200, response.content assert len(response.data) == 3 for attachment in response.data: # foo.png will not be included assert attachment["name"] in [ "screenshot.png", "screenshot-1.png", "crash_screenshot.png", ] assert attachment["event_id"] in [group1_event.event_id, group2_event.event_id] def test_without_feature(self) -> None: self.login_as(user=self.user) self.create_attachment() with self.feature({"organizations:event-attachments": False}): response = self.client.get(self.path()) assert response.status_code == 404, response.content def test_event_id_filter(self) -> None: self.login_as(user=self.user) attachment = self.create_attachment() self.create_attachment(event_id="b" * 32) with self.feature("organizations:event-attachments"): response = self.client.get(self.path(event_ids=[attachment.event_id])) assert response.status_code == 200, response.content assert len(response.data) == 1 assert response.data[0]["event_id"] == attachment.event_id def test_multi_event_id_filter(self) -> None: self.login_as(user=self.user) attachment = self.create_attachment() attachment2 = self.create_attachment(event_id="b" * 32) self.create_attachment(event_id="c" * 32) with self.feature("organizations:event-attachments"): response = self.client.get( self.path(event_ids=[attachment.event_id, attachment2.event_id]) ) assert response.status_code == 200, response.content assert len(response.data) == 2 assert response.data[0]["event_id"] == attachment2.event_id assert response.data[1]["event_id"] == attachment.event_id def test_date_range_filter(self) -> None: self.login_as(user=self.user) old_attachment = self.create_attachment(event_id="b" * 32) old_attachment.date_added = before_now(days=28).isoformat() old_attachment.save() newer_attachment = self.create_attachment(event_id="c" * 32) with self.feature("organizations:event-attachments"): all_response = self.client.get(f"/api/0/issues/{self.group.id}/attachments/") assert len(all_response.data) == 2 with self.feature("organizations:event-attachments"): range_response = self.client.get( f"/api/0/issues/{self.group.id}/attachments/?statsPeriod=14d" ) assert range_response.status_code == 200, range_response.content assert len(range_response.data) == 1 assert range_response.data[0]["id"] == str(newer_attachment.id) assert range_response.data[0]["event_id"] == newer_attachment.event_id def test_event_environment_filter(self) -> None: self.login_as(user=self.user) data = {} for env in ["production", "development"]: event = self.store_event( data={ "fingerprint": ["same-group"], "timestamp": before_now(days=1).isoformat(), "environment": env, }, project_id=self.project.id, ) attachment = self.create_attachment(event_id=event.event_id, group_id=event.group_id) data[env] = (event, attachment) prod_event, prod_attachment = data["production"] assert prod_event.group is not None with self.feature("organizations:event-attachments"): all_response = self.client.get(f"/api/0/issues/{prod_event.group.id}/attachments/") assert len(all_response.data) == 2 with self.feature("organizations:event-attachments"): prod_response = self.client.get( f"/api/0/issues/{prod_event.group.id}/attachments/?environment=production" ) assert len(prod_response.data) == 1 assert prod_response.data[0]["id"] == str(prod_attachment.id) assert prod_response.data[0]["event_id"] == prod_attachment.event_id def test_event_query_filter(self) -> None: self.login_as(user=self.user) data = {} for org in ["sentry", "not-sentry"]: event = self.store_event( data={ "fingerprint": ["same-group"], "timestamp": before_now(days=1).isoformat(), "tags": {"organization": org}, }, project_id=self.project.id, ) attachment = self.create_attachment(event_id=event.event_id, group_id=event.group_id) data[org] = (event, attachment) sentry_event, sentry_attachment = data["sentry"] assert sentry_event.group is not None with self.feature("organizations:event-attachments"): all_response = self.client.get(f"/api/0/issues/{sentry_event.group.id}/attachments/") assert len(all_response.data) == 2 with self.feature("organizations:event-attachments"): prod_response = self.client.get( f"/api/0/issues/{sentry_event.group.id}/attachments/?query=organization:sentry" ) assert len(prod_response.data) == 1 assert prod_response.data[0]["id"] == str(sentry_attachment.id) assert prod_response.data[0]["event_id"] == sentry_attachment.event_id def test_erroneous_event_filter(self) -> None: self.login_as(user=self.user) event = self.store_event( data={ "fingerprint": ["same-group"], "timestamp": before_now(days=1).isoformat(), "tags": {"organization": "sentry"}, "environment": "production", }, project_id=self.project.id, ) self.create_attachment(event_id=event.event_id, group_id=event.group_id) assert event.group is not None with self.feature("organizations:event-attachments"): response = self.client.get( f"/api/0/issues/{event.group.id}/attachments/?query=issue:None" ) assert response.status_code == 400 def test_event_filters_not_matching_should_return_no_attachments(self) -> None: self.login_as(user=self.user) self.create_environment(name="development", project=self.project) event = self.store_event( data={ "fingerprint": ["same-group"], "timestamp": before_now(days=1).isoformat(), "tags": {"organization": "sentry"}, "environment": "production", }, project_id=self.project.id, ) attachment = self.create_attachment(event_id=event.event_id, group_id=event.group_id) assert event.group is not None with self.feature("organizations:event-attachments"): response = self.client.get(f"/api/0/issues/{event.group.id}/attachments/") assert len(response.data) == 1 assert response.data[0]["id"] == str(attachment.id) assert response.data[0]["event_id"] == attachment.event_id with self.feature("organizations:event-attachments"): response = self.client.get( f"/api/0/issues/{event.group.id}/attachments/?query=organization:acme" ) assert len(response.data) == 0 with self.feature("organizations:event-attachments"): response = self.client.get( f"/api/0/issues/{event.group.id}/attachments/?environment=development" ) assert len(response.data) == 0
GroupEventAttachmentsTest
python
PrefectHQ__prefect
src/prefect/server/schemas/filters.py
{ "start": 10418, "end": 11332 }
class ____(PrefectOperatorFilterBaseModel): """Filter by `FlowRun.deployment_id`.""" any_: Optional[list[UUID]] = Field( default=None, description="A list of flow run deployment ids to include" ) is_null_: Optional[bool] = Field( default=None, description="If true, only include flow runs without deployment ids", ) def _get_filter_list( self, db: "PrefectDBInterface" ) -> Iterable[sa.ColumnExpressionArgument[bool]]: filters: list[sa.ColumnExpressionArgument[bool]] = [] if self.any_ is not None: filters.append(db.FlowRun.deployment_id.in_(self.any_)) if self.is_null_ is not None: filters.append( db.FlowRun.deployment_id.is_(None) if self.is_null_ else db.FlowRun.deployment_id.is_not(None) ) return filters
FlowRunFilterDeploymentId
python
fastapi__sqlmodel
docs_src/tutorial/insert/tutorial003.py
{ "start": 92, "end": 1018 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str secret_name: str age: Optional[int] = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): # (1)! hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") # (2)! hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) with Session(engine) as session: # (3)! session.add(hero_1) # (4)! session.add(hero_2) session.add(hero_3) session.commit() # (5)! # (6)! def main(): # (7)! create_db_and_tables() # (8)! create_heroes() # (9)! if __name__ == "__main__": # (10)! main() # (11)!
Hero
python
kubernetes-client__python
kubernetes/client/rest.py
{ "start": 663, "end": 1154 }
class ____(io.IOBase): def __init__(self, resp): self.urllib3_response = resp self.status = resp.status self.reason = resp.reason self.data = resp.data def getheaders(self): """Returns a dictionary of the response headers.""" return self.urllib3_response.getheaders() def getheader(self, name, default=None): """Returns a given response header.""" return self.urllib3_response.getheader(name, default)
RESTResponse
python
django__django
tests/auth_tests/urls.py
{ "start": 3189, "end": 8334 }
class ____(EmptyResponseBaseView): pass @login_not_required def public_view(request): return HttpResponse() def protected_view(request): return HttpResponse() @login_required(login_url="/custom_login/", redirect_field_name="step") def protected_view_with_login_required_decorator(request): return HttpResponse() # special urls for auth test cases urlpatterns = auth_urlpatterns + [ path( "logout/custom_query/", views.LogoutView.as_view(redirect_field_name="follow") ), path("logout/next_page/", views.LogoutView.as_view(next_page="/somewhere/")), path( "logout/next_page/named/", views.LogoutView.as_view(next_page="password_reset") ), path( "logout/allowed_hosts/", views.LogoutView.as_view(success_url_allowed_hosts={"otherserver"}), ), path("remote_user/", remote_user_auth_view), path( "password_reset_from_email/", views.PasswordResetView.as_view(from_email="staffmember@example.com"), ), path( "password_reset_extra_email_context/", views.PasswordResetView.as_view( extra_email_context={"greeting": "Hello!", "domain": "custom.example.com"}, ), ), path( "password_reset/custom_redirect/", views.PasswordResetView.as_view(success_url="/custom/"), ), path( "password_reset/custom_redirect/named/", views.PasswordResetView.as_view(success_url=reverse_lazy("password_reset")), ), path( "password_reset/html_email_template/", views.PasswordResetView.as_view( html_email_template_name="registration/html_password_reset_email.html" ), ), path( "reset/custom/<uidb64>/<token>/", views.PasswordResetConfirmView.as_view(success_url="/custom/"), ), path( "reset/custom/named/<uidb64>/<token>/", views.PasswordResetConfirmView.as_view( success_url=reverse_lazy("password_reset") ), ), path( "reset/custom/token/<uidb64>/<token>/", views.PasswordResetConfirmView.as_view(reset_url_token="set-passwordcustom"), ), path( "reset/post_reset_login/<uidb64>/<token>/", views.PasswordResetConfirmView.as_view(post_reset_login=True), ), path( "reset/post_reset_login_custom_backend/<uidb64>/<token>/", views.PasswordResetConfirmView.as_view( post_reset_login=True, post_reset_login_backend=( "django.contrib.auth.backends.AllowAllUsersModelBackend" ), ), ), path("reset/missing_parameters/", views.PasswordResetConfirmView.as_view()), path( "password_change/custom/", views.PasswordChangeView.as_view(success_url="/custom/"), ), path( "password_change/custom/named/", views.PasswordChangeView.as_view(success_url=reverse_lazy("password_reset")), ), path("login_required/", login_required(views.PasswordResetView.as_view())), path( "login_required_login_url/", login_required(views.PasswordResetView.as_view(), login_url="/somewhere/"), ), path("auth_processor_no_attr_access/", auth_processor_no_attr_access), path("auth_processor_attr_access/", auth_processor_attr_access), path("auth_processor_user/", auth_processor_user), path("auth_processor_perms/", auth_processor_perms), path("auth_processor_perm_in_perms/", auth_processor_perm_in_perms), path("auth_processor_messages/", auth_processor_messages), path( "custom_request_auth_login/", views.LoginView.as_view(authentication_form=CustomRequestAuthenticationForm), ), re_path("^userpage/(.+)/$", userpage, name="userpage"), path("login/redirect_authenticated_user_default/", views.LoginView.as_view()), path( "login/redirect_authenticated_user/", views.LoginView.as_view(redirect_authenticated_user=True), ), path( "login/allowed_hosts/", views.LoginView.as_view(success_url_allowed_hosts={"otherserver"}), ), path( "login/get_default_redirect_url/", CustomDefaultRedirectURLLoginView.as_view() ), path("login/next_page/", views.LoginView.as_view(next_page="/somewhere/")), path("login/next_page/named/", views.LoginView.as_view(next_page="password_reset")), path("permission_required_redirect/", permission_required_redirect), path("permission_required_exception/", permission_required_exception), path( "login_and_permission_required_exception/", login_and_permission_required_exception, ), path("public_view/", PublicView.as_view()), path("public_function_view/", public_view), path("protected_view/", ProtectedView.as_view()), path("protected_function_view/", protected_view), path( "login_required_decorator_view/", protected_view_with_login_required_decorator ), path("login_required_cbv_view/", ProtectedViewWithCustomLoginRequired.as_view()), path("setlang/", set_language, name="set_language"), path("admin/", admin.site.urls), ]
ProtectedViewWithCustomLoginRequired
python
pytorch__pytorch
test/higher_order_ops/test_invoke_subgraph.py
{ "start": 53452, "end": 56379 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[8, 8]"): l_x_ = L_x_ subgraph_0 = self.subgraph_0 invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_); subgraph_0 = None getitem: "f32[8, 8]" = invoke_subgraph[0]; invoke_subgraph = None subgraph_1 = self.subgraph_0 invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_0', l_x_); subgraph_1 = None getitem_1: "f32[8, 8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None mul: "f32[8, 8]" = getitem * getitem_1; getitem = getitem_1 = None add: "f32[8, 8]" = l_x_ + mul; mul = None add_1: "f32[8, 8]" = add + l_x_; add = l_x_ = None return (add_1,) class subgraph_0(torch.nn.Module): def forward(self, l_x_: "f32[8, 8]"): sin: "f32[8, 8]" = torch.sin(l_x_); l_x_ = None return (sin,) """, ) @requires_cuda_and_triton def test_return_none(self): from torch.nn import functional as F weight = torch.ones( 1000, device="cuda:0", dtype=torch.float32, requires_grad=True ) ones = torch.ones(1000, device="cuda:0", dtype=torch.float32) @nested_compile_region def fn(x, train): return F.dropout(x * weight, 0.33, train) @torch._dynamo.optimize_assert("inductor") def run(x, train=True): return fn(x, train) r1 = run(ones, train=False) r1.sum().backward() weight.grad.clone() def test_return_none_from_fwd(self): @nested_compile_region def gn(x): return x * 2, None, x * 3 def fn(x): ys = gn(x) return ys[0] + ys[2] opt_fn = torch.compile(fn, backend="inductor", fullgraph=True) x = torch.randn(8, 8, requires_grad=True) x_clone = x.detach().clone().requires_grad_(True) ref = fn(x) res = opt_fn(x_clone) ref.sum().backward() res.sum().backward() self.assertEqual(ref, res) self.assertEqual(x.grad, x_clone.grad) backend = AotEagerAndRecordGraphs() opt_fn = torch.compile(fn, backend=backend, fullgraph=True) x = torch.randn(8, 8, requires_grad=True) res = opt_fn(x_clone) res.sum().backward() self.assertEqual(len(backend.graphs), 1) self.assertEqual(len(backend.fw_graphs), 1) self.assertEqual(len(backend.bw_graphs), 1) self.count_unique_get_attr_nodes(backend.graphs[0], [], 1) self.count_unique_get_attr_nodes(backend.fw_graphs[0], [], 1) self.count_unique_get_attr_nodes(backend.bw_graphs[0], [], 1) if not TEST_WITH_CROSSREF: self.assertExpectedInline( normalize_gm(backend.graphs[0].print_readable(print_output=False)), """\
GraphModule
python
dagster-io__dagster
python_modules/libraries/dagster-gcp/dagster_gcp/bigquery/types.py
{ "start": 3686, "end": 4291 }
class ____(ConfigScalar): def __init__(self): super().__init__( key=type(self).__name__, given_name=type(self).__name__, scalar_kind=ConfigScalarKind.STRING, ) def post_process(self, value): if not _is_valid_table(value): raise PostProcessingError( 'Tables must be of the form "project.dataset.table" or "dataset.table" ' "with optional date-partition suffix" ) return value # https://github.com/dagster-io/dagster/issues/1971 Table = _Table() Dataset = _Dataset()
_Table
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 5924, "end": 6087 }
class ____(_SimpleAutomotiveTestMixin): """Test fi_FI automotive provider methods""" license_plate_pattern: Pattern = re.compile(r"[A-Z]{3}-\d{3}")
TestFiFi
python
kamyu104__LeetCode-Solutions
Python/furthest-building-you-can-reach.py
{ "start": 48, "end": 735 }
class ____(object): def furthestBuilding(self, heights, bricks, ladders): """ :type heights: List[int] :type bricks: int :type ladders: int :rtype: int """ min_heap = [] for i in xrange(len(heights)-1): diff = heights[i+1]-heights[i] if diff > 0: heapq.heappush(min_heap, diff) if len(min_heap) <= ladders: # ladders are reserved for largest diffs continue bricks -= heapq.heappop(min_heap) # use bricks if ladders are not enough if bricks < 0: # not enough bricks return i return len(heights)-1
Solution
python
pyca__cryptography
tests/hazmat/primitives/test_pkcs12.py
{ "start": 35963, "end": 43701 }
class ____: def test_certificate_constructor(self, backend): with pytest.raises(TypeError): PKCS12Certificate(None, None) # type:ignore[arg-type] with pytest.raises(TypeError): PKCS12Certificate("hello", None) # type:ignore[arg-type] cert = _load_cert(backend, os.path.join("x509", "cryptography.io.pem")) with pytest.raises(TypeError): PKCS12Certificate(cert, "hello") # type:ignore[arg-type] with pytest.raises(TypeError): PKCS12Certificate(cert, 42) # type:ignore[arg-type] def test_certificate_equality(self, backend): cert2 = _load_cert( backend, os.path.join("x509", "custom", "dsa_selfsigned_ca.pem") ) cert3 = _load_cert(backend, os.path.join("x509", "letsencryptx3.pem")) c2n = PKCS12Certificate(cert2, None) c2a = PKCS12Certificate(cert2, b"a") c2b = PKCS12Certificate(cert2, b"b") c3n = PKCS12Certificate(cert3, None) c3a = PKCS12Certificate(cert3, b"a") assert c2n == c2n assert c2a == c2a assert c2n != c2a assert c2n != c3n assert c2a != c2b assert c2a != c3a assert c2n != "test" # type: ignore[comparison-overlap] def test_certificate_hash(self, backend): cert2 = _load_cert( backend, os.path.join("x509", "custom", "dsa_selfsigned_ca.pem") ) cert3 = _load_cert(backend, os.path.join("x509", "letsencryptx3.pem")) c2n = PKCS12Certificate(cert2, None) c2a = PKCS12Certificate(cert2, b"a") c2b = PKCS12Certificate(cert2, b"b") c3n = PKCS12Certificate(cert3, None) c3a = PKCS12Certificate(cert3, b"a") assert hash(c2n) == hash(c2n) assert hash(c2a) == hash(c2a) assert hash(c2n) != hash(c2a) assert hash(c2n) != hash(c3n) assert hash(c2a) != hash(c2b) assert hash(c2a) != hash(c3a) def test_certificate_repr(self, backend): cert = _load_cert(backend, os.path.join("x509", "cryptography.io.pem")) assert ( repr(PKCS12Certificate(cert, None)) == f"<PKCS12Certificate({cert!r}, friendly_name=None)>" ) assert ( repr(PKCS12Certificate(cert, b"a")) == f"<PKCS12Certificate({cert!r}, friendly_name=b'a')>" ) def test_key_and_certificates_constructor(self, backend): with pytest.raises(TypeError): PKCS12KeyAndCertificates( "hello", # type:ignore[arg-type] None, [], ) with pytest.raises(TypeError): PKCS12KeyAndCertificates( None, "hello", # type:ignore[arg-type] [], ) with pytest.raises(TypeError): PKCS12KeyAndCertificates( None, None, ["hello"], # type:ignore[list-item] ) def test_key_and_certificates_equality(self, backend): cert, key = _load_ca(backend) cert2 = _load_cert( backend, os.path.join("x509", "custom", "dsa_selfsigned_ca.pem") ) cert3 = _load_cert(backend, os.path.join("x509", "letsencryptx3.pem")) p12a = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12b = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, b"name"), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12c = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert2, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12d = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, None), [PKCS12Certificate(cert3, None), PKCS12Certificate(cert2, None)], ) p12e = PKCS12KeyAndCertificates( None, PKCS12Certificate(cert, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12f = PKCS12KeyAndCertificates( None, PKCS12Certificate(cert2, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12g = PKCS12KeyAndCertificates( key, None, [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12h = PKCS12KeyAndCertificates(None, None, []) assert p12a == p12a assert p12h == p12h assert p12a != p12b assert p12a != p12c assert p12a != p12d assert p12a != p12e assert p12a != p12g assert p12a != p12h assert p12e != p12f assert p12e != p12g assert p12e != p12h assert p12e != "test" def test_key_and_certificates_hash(self, backend): cert, key = _load_ca(backend) cert2 = _load_cert( backend, os.path.join("x509", "custom", "dsa_selfsigned_ca.pem") ) cert3 = _load_cert(backend, os.path.join("x509", "letsencryptx3.pem")) p12a = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12b = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, b"name"), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12c = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert2, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12d = PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, None), [PKCS12Certificate(cert3, None), PKCS12Certificate(cert2, None)], ) p12e = PKCS12KeyAndCertificates( None, PKCS12Certificate(cert, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12f = PKCS12KeyAndCertificates( None, PKCS12Certificate(cert2, None), [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12g = PKCS12KeyAndCertificates( key, None, [PKCS12Certificate(cert2, None), PKCS12Certificate(cert3, None)], ) p12h = PKCS12KeyAndCertificates(None, None, []) assert hash(p12a) == hash(p12a) assert hash(p12h) == hash(p12h) assert hash(p12a) != hash(p12b) assert hash(p12a) != hash(p12c) assert hash(p12a) != hash(p12d) assert hash(p12a) != hash(p12e) assert hash(p12a) != hash(p12g) assert hash(p12a) != hash(p12h) assert hash(p12e) != hash(p12f) assert hash(p12e) != hash(p12g) assert hash(p12e) != hash(p12h) def test_key_and_certificates_repr(self, backend): cert, key = _load_ca(backend) cert2 = _load_cert( backend, os.path.join("x509", "cryptography.io.pem") ) assert repr( PKCS12KeyAndCertificates( key, PKCS12Certificate(cert, None), [PKCS12Certificate(cert2, b"name2")], ) ) == ( f"<PKCS12KeyAndCertificates(key={key}, " f"cert=<PKCS12Certificate({cert}, friendly_name=None)>, " f"additional_certs=[" f"<PKCS12Certificate({cert2}, friendly_name=b'name2')>])>" )
TestPKCS12Objects
python
altair-viz__altair
tests/utils/test_core.py
{ "start": 1281, "end": 1389 }
class ____(FieldChannel, schemapi.SchemaBase): _schema = {json_schema_dict_str} _encoding_name = "y"
Y
python
pandas-dev__pandas
pandas/tests/test_algos.py
{ "start": 55126, "end": 63448 }
class ____: def test_duplicated_with_nas(self): keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object) result = algos.duplicated(keys) expected = np.array([False, False, False, True, False, True]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep="first") expected = np.array([False, False, False, True, False, True]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep="last") expected = np.array([True, False, True, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep=False) expected = np.array([True, False, True, True, False, True]) tm.assert_numpy_array_equal(result, expected) keys = np.empty(8, dtype=object) for i, t in enumerate( zip([0, 0, np.nan, np.nan] * 2, [0, np.nan, 0, np.nan] * 2) ): keys[i] = t result = algos.duplicated(keys) falses = [False] * 4 trues = [True] * 4 expected = np.array(falses + trues) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep="last") expected = np.array(trues + falses) tm.assert_numpy_array_equal(result, expected) result = algos.duplicated(keys, keep=False) expected = np.array(trues + trues) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( "case", [ np.array([1, 2, 1, 5, 3, 2, 4, 1, 5, 6]), np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), np.array( [ 1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j, 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j, ] ), np.array(["a", "b", "a", "e", "c", "b", "d", "a", "e", "f"], dtype=object), np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], dtype=np.uint64), ], ) def test_numeric_object_likes(self, case): exp_first = np.array( [False, False, True, False, False, True, False, True, True, False] ) exp_last = np.array( [True, True, True, True, False, False, False, False, False, False] ) exp_false = exp_first | exp_last res_first = algos.duplicated(case, keep="first") tm.assert_numpy_array_equal(res_first, exp_first) res_last = algos.duplicated(case, keep="last") tm.assert_numpy_array_equal(res_last, exp_last) res_false = algos.duplicated(case, keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # index for idx in [Index(case), Index(case, dtype="category")]: res_first = idx.duplicated(keep="first") tm.assert_numpy_array_equal(res_first, exp_first) res_last = idx.duplicated(keep="last") tm.assert_numpy_array_equal(res_last, exp_last) res_false = idx.duplicated(keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # series for s in [Series(case), Series(case, dtype="category")]: res_first = s.duplicated(keep="first") tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep="last") tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) tm.assert_series_equal(res_false, Series(exp_false)) def test_datetime_likes(self): dt = [ "2011-01-01", "2011-01-02", "2011-01-01", "NaT", "2011-01-03", "2011-01-02", "2011-01-04", "2011-01-01", "NaT", "2011-01-06", ] td = [ "1 days", "2 days", "1 days", "NaT", "3 days", "2 days", "4 days", "1 days", "NaT", "6 days", ] cases = [ np.array([Timestamp(d) for d in dt]), np.array([Timestamp(d, tz="US/Eastern") for d in dt]), np.array([Period(d, freq="D") for d in dt]), np.array([np.datetime64(d) for d in dt]), np.array([Timedelta(d) for d in td]), ] exp_first = np.array( [False, False, True, False, False, True, False, True, True, False] ) exp_last = np.array( [True, True, True, True, False, False, False, False, False, False] ) exp_false = exp_first | exp_last for case in cases: res_first = algos.duplicated(case, keep="first") tm.assert_numpy_array_equal(res_first, exp_first) res_last = algos.duplicated(case, keep="last") tm.assert_numpy_array_equal(res_last, exp_last) res_false = algos.duplicated(case, keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # index for idx in [ Index(case), Index(case, dtype="category"), Index(case, dtype=object), ]: res_first = idx.duplicated(keep="first") tm.assert_numpy_array_equal(res_first, exp_first) res_last = idx.duplicated(keep="last") tm.assert_numpy_array_equal(res_last, exp_last) res_false = idx.duplicated(keep=False) tm.assert_numpy_array_equal(res_false, exp_false) # series for s in [ Series(case), Series(case, dtype="category"), Series(case, dtype=object), ]: res_first = s.duplicated(keep="first") tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep="last") tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) tm.assert_series_equal(res_false, Series(exp_false)) @pytest.mark.parametrize("case", [Index([1, 2, 3]), pd.RangeIndex(0, 3)]) def test_unique_index(self, case): assert case.is_unique is True tm.assert_numpy_array_equal(case.duplicated(), np.array([False, False, False])) @pytest.mark.parametrize( "arr, uniques", [ ( [(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)], [(0, 0), (0, 1), (1, 0), (1, 1)], ), ( [("b", "c"), ("a", "b"), ("a", "b"), ("b", "c")], [("b", "c"), ("a", "b")], ), ([("a", 1), ("b", 2), ("a", 3), ("a", 1)], [("a", 1), ("b", 2), ("a", 3)]), ], ) def test_unique_tuples(self, arr, uniques): # https://github.com/pandas-dev/pandas/issues/16519 expected = np.empty(len(uniques), dtype=object) expected[:] = uniques msg = ( r"unique requires a Series, Index, ExtensionArray, np.ndarray " r"or NumpyExtensionArray got list" ) with pytest.raises(TypeError, match=msg): # GH#52986 pd.unique(arr) res = pd.unique(com.asarray_tuplesafe(arr, dtype=object)) tm.assert_numpy_array_equal(res, expected) @pytest.mark.parametrize( "array,expected", [ ( [1 + 1j, 0, 1, 1j, 1 + 2j, 1 + 2j], np.array([(1 + 1j), 0j, (1 + 0j), 1j, (1 + 2j)], dtype=complex), ) ], ) def test_unique_complex_numbers(self, array, expected): # GH 17927 msg = ( r"unique requires a Series, Index, ExtensionArray, np.ndarray " r"or NumpyExtensionArray got list" ) with pytest.raises(TypeError, match=msg): # GH#52986 pd.unique(array) res = pd.unique(np.array(array)) tm.assert_numpy_array_equal(res, expected)
TestDuplicated
python
pypa__pip
src/pip/_vendor/rich/pager.py
{ "start": 291, "end": 827 }
class ____(Pager): """Uses the pager installed on the system.""" def _pager(self, content: str) -> Any: #  pragma: no cover return __import__("pydoc").pager(content) def show(self, content: str) -> None: """Use the same pager used by pydoc.""" self._pager(content) if __name__ == "__main__": # pragma: no cover from .__main__ import make_test_card from .console import Console console = Console() with console.pager(styles=True): console.print(make_test_card())
SystemPager
python
streamlit__streamlit
lib/streamlit/runtime/state/session_state.py
{ "start": 11954, "end": 37317 }
class ____: """SessionState allows users to store values that persist between app reruns. Example ------- >>> if "num_script_runs" not in st.session_state: ... st.session_state.num_script_runs = 0 >>> st.session_state.num_script_runs += 1 >>> st.write(st.session_state.num_script_runs) # writes 1 The next time your script runs, the value of st.session_state.num_script_runs will be preserved. >>> st.session_state.num_script_runs += 1 >>> st.write(st.session_state.num_script_runs) # writes 2 """ # All the values from previous script runs, squished together to save memory _old_state: dict[str, Any] = field(default_factory=dict) # Values set in session state during the current script run, possibly for # setting a widget's value. Keyed by a user provided string. _new_session_state: dict[str, Any] = field(default_factory=dict) # Widget values from the frontend, usually one changing prompted the script rerun _new_widget_state: WStates = field(default_factory=WStates) # Keys used for widgets will be eagerly converted to the matching element id _key_id_mapper: KeyIdMapper = field(default_factory=KeyIdMapper) # query params are stored in session state because query params will be tied with # widget state at one point. query_params: QueryParams = field(default_factory=QueryParams) def __repr__(self) -> str: return util.repr_(self) # is it possible for a value to get through this without being deserialized? def _compact_state(self) -> None: """Copy all current session_state and widget_state values into our _old_state dict, and then clear our current session_state and widget_state. """ for key_or_wid in self: try: self._old_state[key_or_wid] = self[key_or_wid] except KeyError: # noqa: PERF203 # handle key errors from widget state not having metadata gracefully # https://github.com/streamlit/streamlit/issues/7206 pass self._new_session_state.clear() self._new_widget_state.clear() def clear(self) -> None: """Reset self completely, clearing all current and old values.""" self._old_state.clear() self._new_session_state.clear() self._new_widget_state.clear() self._key_id_mapper.clear() @property def filtered_state(self) -> dict[str, Any]: """The combined session and widget state, excluding keyless widgets and internal widgets.""" wid_key_map = self._key_id_mapper.id_key_mapping state: dict[str, Any] = {} # We can't write `for k, v in self.items()` here because doing so will # run into a `KeyError` if widget metadata has been cleared (which # happens when the streamlit server restarted or the cache was cleared), # then we receive a widget's state from a browser. for k in self._keys(): if not is_element_id(k) and not _is_internal_key(k): state[k] = self[k] elif is_keyed_element_id(k) and not _is_internal_key(k): try: key = wid_key_map[k] # Value returned by __getitem__ is already presented. state[key] = self[k] except KeyError: # Widget id no longer maps to a key, it is a not yet # cleared value in old state for a reset widget pass return state def _keys(self) -> set[str]: """All keys active in Session State, with widget keys converted to widget ids when one is known. (This includes autogenerated keys for widgets that don't have user_keys defined, and which aren't exposed to user code). """ old_keys = {self._get_widget_id(k) for k in self._old_state} new_widget_keys = set(self._new_widget_state.keys()) new_session_state_keys = { self._get_widget_id(k) for k in self._new_session_state } return old_keys | new_widget_keys | new_session_state_keys def is_new_state_value(self, user_key: str) -> bool: """True if a value with the given key is in the current session state.""" return user_key in self._new_session_state def reset_state_value(self, user_key: str, value: Any | None) -> None: """Reset a new session state value to a given value without triggering the "state value cannot be modified" error. """ self._new_session_state[user_key] = value def __iter__(self) -> Iterator[Any]: """Return an iterator over the keys of the SessionState. This is a shortcut for `iter(self.keys())`. """ return iter(self._keys()) def __len__(self) -> int: """Return the number of items in SessionState.""" return len(self._keys()) def __getitem__(self, key: str) -> Any: wid_key_map = self._key_id_mapper.id_key_mapping widget_id = self._get_widget_id(key) if widget_id in wid_key_map and widget_id == key: # the "key" is a raw widget id, so get its associated user key for lookup key = wid_key_map[widget_id] try: base_value = self._getitem(widget_id, key) return ( apply_presenter(self, widget_id, base_value) if widget_id is not None else base_value ) except KeyError: raise KeyError(_missing_key_error_message(key)) def _getitem(self, widget_id: str | None, user_key: str | None) -> Any: """Get the value of an entry in Session State, using either the user-provided key or a widget id as appropriate for the internal dict being accessed. At least one of the arguments must have a value. """ if user_key is None and widget_id is None: raise ValueError( "user_key and widget_id cannot both be None. This should never happen." ) if user_key is not None: try: return self._new_session_state[user_key] except KeyError: pass if widget_id is not None: try: return self._new_widget_state[widget_id] except KeyError: pass # Typically, there won't be both a widget id and an associated state key in # old state at the same time, so the order we check is arbitrary. # The exception is if session state is set and then a later run has # a widget created, so the widget id entry should be newer. # The opposite case shouldn't happen, because setting the value of a widget # through session state will result in the next widget state reflecting that # value. if widget_id is not None: try: return self._old_state[widget_id] except KeyError: pass if user_key is not None: try: return self._old_state[user_key] except KeyError: pass # We'll never get here raise KeyError def __setitem__(self, user_key: str, value: Any) -> None: """Set the value of the session_state entry with the given user_key. If the key corresponds to a widget or form that's been instantiated during the current script run, raise a StreamlitAPIException instead. """ ctx = get_script_run_ctx() if ctx is not None: widget_id = self._key_id_mapper.get_id_from_key(user_key, None) widget_ids = ctx.widget_ids_this_run form_ids = ctx.form_ids_this_run if widget_id in widget_ids or user_key in form_ids: raise StreamlitAPIException( f"`st.session_state.{user_key}` cannot be modified after the widget" f" with key `{user_key}` is instantiated." ) self._new_session_state[user_key] = value def __delitem__(self, key: str) -> None: widget_id = self._get_widget_id(key) if not (key in self or widget_id in self): raise KeyError(_missing_key_error_message(key)) if key in self._new_session_state: del self._new_session_state[key] if key in self._old_state: del self._old_state[key] if key in self._key_id_mapper: self._key_id_mapper.delete(key) if widget_id in self._new_widget_state: del self._new_widget_state[widget_id] if widget_id in self._old_state: del self._old_state[widget_id] def set_widgets_from_proto(self, widget_states: WidgetStatesProto) -> None: """Set the value of all widgets represented in the given WidgetStatesProto.""" for state in widget_states.widgets: self._new_widget_state.set_widget_from_proto(state) def on_script_will_rerun(self, latest_widget_states: WidgetStatesProto) -> None: """Called by ScriptRunner before its script re-runs. Update widget data and call callbacks on widgets whose value changed between the previous and current script runs. """ # Clear any triggers that weren't reset because the script was disconnected self._reset_triggers() self._compact_state() self.set_widgets_from_proto(latest_widget_states) self._call_callbacks() def _call_callbacks(self) -> None: """Call callbacks for widgets whose value changed or whose trigger fired.""" from streamlit.runtime.scriptrunner import RerunException # Path 1: single callback. changed_widget_ids_for_single_callback = [ wid for wid in self._new_widget_state if self._widget_changed(wid) and (metadata := self._new_widget_state.widget_metadata.get(wid)) is not None and metadata.callback is not None ] for wid in changed_widget_ids_for_single_callback: try: self._new_widget_state.call_callback(wid) except RerunException: # noqa: PERF203 get_dg_singleton_instance().main_dg.warning( "Calling st.rerun() within a callback is a no-op." ) # Path 2: multiple callbacks. widget_ids_to_process = list(self._new_widget_state.states.keys()) for wid in widget_ids_to_process: metadata = self._new_widget_state.widget_metadata.get(wid) if not metadata or metadata.callbacks is None: continue args = metadata.callback_args or () kwargs = metadata.callback_kwargs or {} # 1) Trigger dispatch: bool + JSON trigger aggregator self._dispatch_trigger_callbacks(wid, metadata, args, kwargs) # 2) JSON value change dispatch if metadata.value_type == "json_value": self._dispatch_json_change_callbacks(wid, metadata, args, kwargs) def _execute_widget_callback( self, callback_fn: WidgetCallback, cb_metadata: WidgetMetadata[Any], cb_args: WidgetArgs, cb_kwargs: dict[str, Any], ) -> None: """Execute a widget callback with fragment-aware context. If the widget belongs to a fragment, temporarily marks the current script context as being inside a fragment callback to adapt rerun semantics. Attempts to call ``st.rerun()`` inside a widget callback are converted to a user-visible warning and treated as a no-op. Parameters ---------- callback_fn : WidgetCallback The user-provided callback to execute. cb_metadata : WidgetMetadata[Any] Metadata of the widget associated with the callback. cb_args : WidgetArgs Positional arguments passed to the callback. cb_kwargs : dict[str, Any] Keyword arguments passed to the callback. """ from streamlit.runtime.scriptrunner import RerunException ctx = get_script_run_ctx() if ctx and cb_metadata.fragment_id is not None: ctx.in_fragment_callback = True try: callback_fn(*cb_args, **cb_kwargs) except RerunException: get_dg_singleton_instance().main_dg.warning( "Calling st.rerun() within a callback is a no-op." ) finally: ctx.in_fragment_callback = False else: try: callback_fn(*cb_args, **cb_kwargs) except RerunException: get_dg_singleton_instance().main_dg.warning( "Calling st.rerun() within a callback is a no-op." ) def _dispatch_trigger_callbacks( self, wid: str, metadata: WidgetMetadata[Any], args: WidgetArgs, kwargs: dict[str, Any], ) -> None: """Dispatch trigger-style callbacks for a widget. Handles the JSON trigger aggregator. The JSON payload may be a single event dict or a list of event dicts; each event must contain an ``"event"`` field that maps to the corresponding callback name in ``metadata.callbacks``. Examples -------- A component with a "submit" callback: >>> metadata.callbacks = {"submit": on_submit} The frontend can send a single event payload: >>> {"event": "submit", "value": "payload"} Or a list of event payloads to be processed in order: >>> [{"event": "edit", ...}, {"event": "submit", ...}] Parameters ---------- wid : str The widget ID. metadata : WidgetMetadata[Any] Metadata for the widget, including registered callbacks. args : WidgetArgs Positional arguments forwarded to the callback. kwargs : dict[str, Any] Keyword arguments forwarded to the callback. """ widget_proto_state = self._new_widget_state.get_serialized(wid) if not widget_proto_state: return # JSON trigger aggregator: value is deserialized by metadata.deserializer if widget_proto_state.json_trigger_value: try: deserialized = self._new_widget_state[wid] except KeyError: deserialized = None payloads: list[object] if isinstance(deserialized, list): payloads = deserialized else: payloads = [deserialized] for payload in payloads: if isinstance(payload, dict): event_name = cast("Mapping[str, object]", payload).get("event") if isinstance(event_name, str) and metadata.callbacks: cb = metadata.callbacks.get(event_name) if cb is not None: self._execute_widget_callback(cb, metadata, args, kwargs) def _dispatch_json_change_callbacks( self, wid: str, metadata: WidgetMetadata[Any], args: WidgetArgs, kwargs: dict[str, Any], ) -> None: """Dispatch change callbacks for JSON-valued widgets. Computes a shallow diff between the new and old JSON maps and invokes callbacks for keys that changed or were added/removed. Parameters ---------- wid : str The widget ID. metadata : WidgetMetadata[Any] Metadata for the widget, including registered callbacks. args : WidgetArgs Positional arguments forwarded to the callback. kwargs : dict[str, Any] Keyword arguments forwarded to the callback. """ if not metadata.callbacks: return try: new_val = self._new_widget_state.get(wid) except KeyError: new_val = None old_val = self._old_state.get(wid) def unwrap(obj: object) -> dict[str, object]: if not isinstance(obj, dict): return {} obj = cast("dict[str, Any]", obj) if set(obj.keys()) == {"value"}: value = obj.get("value") if isinstance(value, dict): return dict(value) # shallow copy return dict(obj) new_map = unwrap(new_val) old_map = unwrap(old_val) if new_map or old_map: all_keys = new_map.keys() | old_map.keys() changed_keys = {k for k in all_keys if old_map.get(k) != new_map.get(k)} for key in changed_keys: cb = metadata.callbacks.get(key) if cb is not None: self._execute_widget_callback(cb, metadata, args, kwargs) def _widget_changed(self, widget_id: str) -> bool: """True if the given widget's value changed between the previous script run and the current script run. """ new_value = self._new_widget_state.get(widget_id) old_value = self._old_state.get(widget_id) changed: bool = new_value != old_value return changed def on_script_finished(self, widget_ids_this_run: set[str]) -> None: """Called by ScriptRunner after its script finishes running. Updates widgets to prepare for the next script run. Parameters ---------- widget_ids_this_run: set[str] The IDs of the widgets that were accessed during the script run. Any widget state whose ID does *not* appear in this set is considered "stale" and will be removed. """ self._reset_triggers() self._remove_stale_widgets(widget_ids_this_run) def _reset_triggers(self) -> None: """Set all trigger values in our state dictionary to False.""" for state_id in self._new_widget_state: metadata = self._new_widget_state.widget_metadata.get(state_id) if metadata is not None: if metadata.value_type == "trigger_value": self._new_widget_state[state_id] = Value(False) elif metadata.value_type in { "string_trigger_value", "chat_input_value", "json_trigger_value", }: self._new_widget_state[state_id] = Value(None) for state_id in self._old_state: metadata = self._new_widget_state.widget_metadata.get(state_id) if metadata is not None: if metadata.value_type == "trigger_value": self._old_state[state_id] = False elif metadata.value_type in { "string_trigger_value", "chat_input_value", "json_trigger_value", }: self._old_state[state_id] = None def _remove_stale_widgets(self, active_widget_ids: set[str]) -> None: """Remove widget state for widgets whose ids aren't in `active_widget_ids`.""" ctx = get_script_run_ctx() if ctx is None: return self._new_widget_state.remove_stale_widgets( active_widget_ids, ctx.fragment_ids_this_run, ) # Remove entries from _old_state corresponding to # widgets not in widget_ids. self._old_state = { k: v for k, v in self._old_state.items() if ( not is_element_id(k) or not _is_stale_widget( self._new_widget_state.widget_metadata.get(k), active_widget_ids, ctx.fragment_ids_this_run, ) ) } def _get_widget_metadata(self, widget_id: str) -> WidgetMetadata[Any] | None: """Return the metadata for a widget id from the current widget state.""" return self._new_widget_state.widget_metadata.get(widget_id) def _set_widget_metadata(self, widget_metadata: WidgetMetadata[Any]) -> None: """Set a widget's metadata.""" widget_id = widget_metadata.id self._new_widget_state.widget_metadata[widget_id] = widget_metadata def get_widget_states(self) -> list[WidgetStateProto]: """Return a list of serialized widget values for each widget with a value.""" return self._new_widget_state.as_widget_states() def _get_widget_id(self, k: str) -> str: """Turns a value that might be a widget id or a user provided key into an appropriate widget id. """ # It's guaranteed that the key is a string since the default is string, # so we can cast it to str here: return cast("str", self._key_id_mapper.get_id_from_key(k, k)) def _set_key_widget_mapping(self, widget_id: str, user_key: str) -> None: self._key_id_mapper[user_key] = widget_id def register_widget( self, metadata: WidgetMetadata[T], user_key: str | None ) -> RegisterWidgetResult[T]: """Register a widget with the SessionState. Returns ------- RegisterWidgetResult[T] Contains the widget's current value, and a bool that will be True if the frontend needs to be updated with the current value. """ widget_id = metadata.id self._set_widget_metadata(metadata) if user_key is not None: # If the widget has a user_key, update its user_key:widget_id mapping self._set_key_widget_mapping(widget_id, user_key) if widget_id not in self and (user_key is None or user_key not in self): # This is the first time the widget is registered, so we save its # value in widget state. deserializer = metadata.deserializer initial_widget_value = deepcopy(deserializer(None)) self._new_widget_state.set_from_value(widget_id, initial_widget_value) # Get the current value of the widget for use as its return value. # We return a copy, so that reference types can't be accidentally # mutated by user code. widget_value = cast("T", self[widget_id]) widget_value = deepcopy(widget_value) # widget_value_changed indicates to the caller that the widget's # current value is different from what is in the frontend. widget_value_changed = user_key is not None and self.is_new_state_value( user_key ) return RegisterWidgetResult(widget_value, widget_value_changed) def __contains__(self, key: str) -> bool: try: self[key] except KeyError: return False else: return True def get_stats(self) -> list[CacheStat]: # Lazy-load vendored package to prevent import of numpy from streamlit.vendor.pympler.asizeof import asizeof stat = CacheStat("st_session_state", "", asizeof(self)) return [stat] def _check_serializable(self) -> None: """Verify that everything added to session state can be serialized. We use pickleability as the metric for serializability, and test for pickleability by just trying it. """ for k in self: try: pickle.dumps(self[k]) except Exception as e: # noqa: PERF203 err_msg = ( f"Cannot serialize the value (of type `{type(self[k])}`) of '{k}' in " "st.session_state. Streamlit has been configured to use " "[pickle](https://docs.python.org/3/library/pickle.html) to " "serialize session_state values. Please convert the value to a " "pickle-serializable type. To learn more about this behavior, " "see [our docs](https://docs.streamlit.io/knowledge-base/using-streamlit/serializable-session-state)." ) raise UnserializableSessionStateError(err_msg) from e def maybe_check_serializable(self) -> None: """Verify that session state can be serialized, if the relevant config option is set. See `_check_serializable` for details. """ if config.get_option("runner.enforceSerializableSessionState"): self._check_serializable() def _is_internal_key(key: str) -> bool: return key.startswith(STREAMLIT_INTERNAL_KEY_PREFIX) def _is_stale_widget( metadata: WidgetMetadata[Any] | None, active_widget_ids: set[str], fragment_ids_this_run: list[str] | None, ) -> bool: if not metadata: return True # If we're running 1 or more fragments, but this widget is unrelated to any of the # fragments that we're running, then it should not be marked as stale as its value # may still be needed for a future fragment run or full script run. return not ( metadata.id in active_widget_ids or (fragment_ids_this_run and metadata.fragment_id not in fragment_ids_this_run) ) @dataclass
SessionState
python
pytorch__pytorch
torch/_inductor/config.py
{ "start": 66124, "end": 74083 }
class ____: """ Settings for Ahead-Of-Time Inductor Compilation """ # AOTInductor output path # If an absolute path is specified, the generated lib files will be stored under the directory; # If a relative path is specified, it will be used as a subdirectory under the default caching path; # If not specified, a temp directory will be created under the default caching path. # If the specified path contains something like "model.so", the sub-string will be used # to name the generated library. output_path = "" debug_compile = os.environ.get("AOT_INDUCTOR_DEBUG_COMPILE", "0") == "1" debug_symbols = os.environ.get("AOT_INDUCTOR_DEBUG_SYMBOLS", "0") == "1" # Annotate generated main wrapper function, i.e. AOTInductorModel::run_impl, # to use which cpp compiler optimization level, default to O1 compile_wrapper_opt_level = os.environ.get( "AOT_INDUCTOR_COMPILE_WRAPPER_OPT_LEVEL", "O1" ) # option for debug printing/saving for intermediate tensor values for aot inductor # 0: disable debug dumping # 1: enable saving intermediate tensor values # 2: enable printing intermediate tensor values # 3: enable printing kernel names only (useful for pinpointing troublesome kernels) debug_intermediate_value_printer: Literal["0", "1", "2", "3"] = os.environ.get( "AOT_INDUCTOR_DEBUG_INTERMEDIATE_VALUE_PRINTER", "0" ) # type: ignore[assignment] # filtered nodes to be printed for debug values. Specify this option when debug_intermediate_value_printer is set to 2 filtered_kernel_names = os.environ.get( "AOT_INDUCTOR_FILTERED_KERNELS_TO_PRINT", None ) # Serialized tree spec for flattening inputs # TODO: Move this into metadata serialized_in_spec = "" # Serialized tree spec for flattening outputs # TODO: Move this into metadata serialized_out_spec = "" # flag to decide whether to create a submodule for constant graph. use_runtime_constant_folding: bool = False # flag to force weight to be appended to the shared library and mapped by the runtime # rather than embedded into the data section. Needed to support 1B+ parameter models force_mmap_weights: bool = False # Default value of use_consts_asm_build is True, it will build by assembly language. # When the value is False, it will build by c++ language. use_consts_asm_build = True package: bool = False package_cpp_only: Optional[bool] = None # If package_cpp_only is True, whether cpp files will be compiled to a # dynamically linked library or static linked library dynamic_linkage: bool = True # Dictionary of metadata users might want to save to pass to the runtime. # TODO: Move this somewhere else, since it's no longer really a config metadata: dict[str, str] = {} # fbcode only. Whether to raise error if C++ codegen is too big to optimize raise_error_on_ignored_optimization: bool = ( os.environ.get("AOTINDUCTOR_RAISE_ERROR_ON_IGNORED_OPTIMIZATION", "1") == "1" ) # dump an aoti minifier if program errors dump_aoti_minifier: bool = os.environ.get("DUMP_AOTI_MINIFIER", "0") == "1" # Compiler compilation debug info # 1: Dumps the original graph out to repro.py if compilation fails # 2: Dumps a minifier_launcher.py if aoti fails. # 3: Always dumps a minifier_launcher.py. Good for segfaults. # 4: Dumps a minifier_launcher.py if the accuracy fails. repro_level: int = int(os.environ.get("AOTINDUCTOR_REPRO_LEVEL", 2)) # Dictionary of presets that can be passed in presets: dict[str, Any] = {} # Kill switch for allowing temporary tensors to be allocated as stack arrays. Tests # should be run with this flag both on and off to make sure we have coverage. allow_stack_allocation: bool = False # Enables an alternate DSO interface (the "minimal ArrayRef interface") intended # to maximize performance for use cases that it can accommodate at the expense of # generality. In brief: # - inputs and outputs are ArrayRefTensor<T> (note that strides are required, but the # tensor must be contiguous) # - constant handling is unchanged because it is not a per-inference-iteration bottleneck # # When the DSO is generated in this mode, the usual interface will also be supported, # but performance for that interface may be degraded. use_minimal_arrayref_interface: bool = False # Set to True if we want to use Pytorch's CUDACachingAllocator for weight management weight_use_caching_allocator: bool = ( os.environ.get("AOT_INDUCTOR_WEIGHT_USE_CACHING_ALLOCATOR", "0") == "1" ) # Experimental. Flag to control whether to include weight in .so # Not supported for cross_target_platform="windows". package_constants_in_so: bool = True # Experimental. Flag to control whether to package weight separately on disk and which # format to package it in. # Options: # None: # Do not package weight separately on disk. # "pickle_weights": # Each weight is pickled and stored separately in data/weights. We also store the # FQN names of each weight in a weights_config.json in each model's data/aot_inductor/model folder. # Can only be load back from python using torch._inductor.aoti_load_package API now. # "binary_blob": # Stores all weights in a single binary blob in data/aot_inductor/model folder for each model. # This option and config.aot_inductor.force_mmap_weights cannot both be True package_constants_on_disk_format: Optional[str] = None # Experimental. Controls automatic precompiling of common AOTI include files. precompile_headers: bool = not is_fbcode() # Embed generated kernel binary files into model.so embed_kernel_binary: Optional[bool] = None # Generate kernel files that support multiple archs # For CUDA, this means generating fatbin files for kernels, and the fatbin files # contains PTX and SASS for the current architecture. emit_multi_arch_kernel: Optional[bool] = None # If not None, the generated files with use this name in file stem. # If None, we will use a hash to name files. # # If package_cpp_only, this name is also used for the target name in CMakelists.txt # The default target name is "aoti_model" # # If compile_standalone, the aoti model class name is f"AOTInductorModel{name}" # # This name can only contain letters, numbers, and underscores. model_name_for_generated_files: Optional[str] = None # Custom ops that have implemented C shim wrappers, defined as an op to C shim declaration dict custom_ops_to_c_shims: dict[torch._ops.OpOverload, list[str]] = {} # custom op libs that have implemented C shim wrappers custom_op_libs: Optional[list[str]] = None # Whether to enable link-time-optimization enable_lto = os.environ.get("AOT_INDUCTOR_ENABLE_LTO", "0") == "1" # Whether the compiled .so should link to libtorch link_libtorch: bool = True # Currently the only valid option is "windows". # We'll use x86_64-w64-mingw32-gcc to cross-compile a .dll file # If using cuda, you also need to set WINDOWS_CUDA_HOME env var # to point to windows CUDA toolkit. # Example: WINDOWS_CUDA_HOME=cuda-windows-base/cuda_cudart/cudart/ # The path should contain lib cuda and lib cudart cross_target_platform: Optional[str] = None # If link_libtorch is False and cross_target_platform is windows, # a library needs to be provided to provide the shim implementations. aoti_shim_library: Optional[str | list[str]] = None aoti_shim_library_path: Optional[str] = None # a convenient class that automatically sets a group of the configs in aot_inductor # it should only control the flags in aot_inductor. # it should not do anything else.
aot_inductor
python
ray-project__ray
rllib/policy/tests/test_policy_checkpoint_restore.py
{ "start": 1200, "end": 5008 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls) -> None: ray.init() @classmethod def tearDownClass(cls) -> None: ray.shutdown() def test_policy_from_checkpoint_twice_torch(self): return _do_checkpoint_twice_test("torch") def test_add_policy_connector_enabled(self): with tempfile.TemporaryDirectory() as tmpdir: config = ( APPOConfig() .api_stack( enable_env_runner_and_connector_v2=False, enable_rl_module_and_learner=False, ) .environment("CartPole-v1") ) algo = config.build() algo.train() result = algo.save(checkpoint_dir=tmpdir) path_to_checkpoint = os.path.join( result.checkpoint.path, "policies", "default_policy" ) policy = Policy.from_checkpoint(path_to_checkpoint) self.assertIsNotNone(policy) # Add this policy to an Algorithm. algo = ( APPOConfig() .api_stack( enable_env_runner_and_connector_v2=False, enable_rl_module_and_learner=False, ) .framework(framework="torch") .environment("CartPole-v0") ).build() # Add the entire policy. self.assertIsNotNone(algo.add_policy("test_policy", policy=policy)) # Add the same policy, but using individual parameter API. self.assertIsNotNone( algo.add_policy( "test_policy_2", policy_cls=type(policy), observation_space=policy.observation_space, action_space=policy.action_space, config=policy.config, policy_state=policy.get_state(), ) ) def test_restore_checkpoint_with_nested_obs_space(self): from ray.rllib.algorithms.ppo.ppo import PPOConfig obs_space = gym.spaces.Box(low=0, high=1, shape=(4,)) # create 10 levels of nested observation space space = obs_space for i in range(10): space.original_space = gym.spaces.Discrete(2) space = space.original_space policy = ( PPOConfig() .api_stack( enable_env_runner_and_connector_v2=False, enable_rl_module_and_learner=False, ) .environment( observation_space=obs_space, action_space=gym.spaces.Discrete(2) ) # Note (Artur): We have to choose num_env_runners=0 here, because # otherwise RolloutWorker will be health-checked without an env which # raises an error. You could also disable the health-check here. .env_runners(num_env_runners=0) .build() .get_policy() ) ckpt_dir = "/tmp/test_ckpt" policy.export_checkpoint(ckpt_dir) # Create a new policy from the checkpoint. new_policy = Policy.from_checkpoint(ckpt_dir) # check that the new policy has the same nested observation space space = new_policy.observation_space for i in range(10): self.assertEqual(space.original_space, gym.spaces.Discrete(2)) space = space.original_space if __name__ == "__main__": import sys import pytest # One can specify the specific TestCase class to run. # None for all unittest.TestCase classes in this file. class_ = sys.argv[1] if len(sys.argv) > 1 else None sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)]))
TestPolicyFromCheckpoint
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/DiffTreeWidget.py
{ "start": 153, "end": 5832 }
class ____(QtWidgets.QWidget): """ Widget for displaying differences between hierarchical python data structures (eg, nested dicts, lists, and arrays) """ def __init__(self, parent=None, a=None, b=None): QtWidgets.QWidget.__init__(self, parent) self.layout = QtWidgets.QHBoxLayout() self.setLayout(self.layout) self.trees = [DataTreeWidget(self), DataTreeWidget(self)] for t in self.trees: self.layout.addWidget(t) if a is not None: self.setData(a, b) def setData(self, a, b): """ Set the data to be compared in this widget. """ self.data = (a, b) self.trees[0].setData(a) self.trees[1].setData(b) return self.compare(a, b) def compare(self, a, b, path=()): """ Compare data structure *a* to structure *b*. Return True if the objects match completely. Otherwise, return a structure that describes the differences: { 'type': bool 'len': bool, 'str': bool, 'shape': bool, 'dtype': bool, 'mask': array, } """ bad = (255, 200, 200) # generate typestr, desc, childs for each object typeA, descA, childsA, _ = self.trees[0].parse(a) typeB, descB, childsB, _ = self.trees[1].parse(b) if typeA != typeB: self.setColor(path, 1, bad) if descA != descB: self.setColor(path, 2, bad) if isinstance(a, dict) and isinstance(b, dict): keysA = set(a.keys()) keysB = set(b.keys()) for key in keysA - keysB: self.setColor(path+(key,), 0, bad, tree=0) for key in keysB - keysA: self.setColor(path+(key,), 0, bad, tree=1) for key in keysA & keysB: self.compare(a[key], b[key], path+(key,)) elif isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)): for i in range(max(len(a), len(b))): if len(a) <= i: self.setColor(path+(i,), 0, bad, tree=1) elif len(b) <= i: self.setColor(path+(i,), 0, bad, tree=0) else: self.compare(a[i], b[i], path+(i,)) elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and a.shape == b.shape: tableNodes = [tree.nodes[path].child(0) for tree in self.trees] if a.dtype.fields is None and b.dtype.fields is None: eq = self.compareArrays(a, b) if not np.all(eq): for n in tableNodes: n.setBackground(0, fn.mkBrush(bad)) #for i in np.argwhere(~eq): else: if a.dtype == b.dtype: for i,k in enumerate(a.dtype.fields.keys()): eq = self.compareArrays(a[k], b[k]) if not np.all(eq): for n in tableNodes: n.setBackground(0, fn.mkBrush(bad)) #for j in np.argwhere(~eq): # dict: compare keys, then values where keys match # list: # array: compare elementwise for same shape def compareArrays(self, a, b): intnan = -9223372036854775808 # happens when np.nan is cast to int anans = np.isnan(a) | (a == intnan) bnans = np.isnan(b) | (b == intnan) eq = anans == bnans mask = ~anans eq[mask] = np.allclose(a[mask], b[mask]) return eq def setColor(self, path, column, color, tree=None): brush = fn.mkBrush(color) # Color only one tree if specified. if tree is None: trees = self.trees else: trees = [self.trees[tree]] for tree in trees: item = tree.nodes[path] item.setBackground(column, brush) def _compare(self, a, b): """ Compare data structure *a* to structure *b*. """ # Check test structures are the same assert type(info) is type(expect) if hasattr(info, '__len__'): assert len(info) == len(expect) if isinstance(info, dict): for k in info: assert k in expect for k in expect: assert k in info self.compare_results(info[k], expect[k]) elif isinstance(info, list): for i in range(len(info)): self.compare_results(info[i], expect[i]) elif isinstance(info, np.ndarray): assert info.shape == expect.shape assert info.dtype == expect.dtype if info.dtype.fields is None: intnan = -9223372036854775808 # happens when np.nan is cast to int inans = np.isnan(info) | (info == intnan) enans = np.isnan(expect) | (expect == intnan) assert np.all(inans == enans) mask = ~inans assert np.allclose(info[mask], expect[mask]) else: for k in info.dtype.fields.keys(): self.compare_results(info[k], expect[k]) else: try: assert info == expect except Exception: raise NotImplementedError("Cannot compare objects of type %s" % type(info))
DiffTreeWidget
python
Lightning-AI__lightning
tests/tests_pytorch/callbacks/test_early_stopping.py
{ "start": 19236, "end": 19350 }
class ____(BoringModel): def on_validation_epoch_end(self): self.log("val_loss", 10.0)
ModelWithHighLoss
python
django-guardian__django-guardian
guardian/managers.py
{ "start": 6297, "end": 6459 }
class ____(BaseObjectPermissionManager): """ See Also: `guardian.managers.UserObjectPermissionManager` """ pass
UserObjectPermissionManager
python
huggingface__transformers
src/transformers/utils/quantization_config.py
{ "start": 9718, "end": 14162 }
class ____(QuantizationConfigMixin): """ This is wrapper around hqq's BaseQuantizeConfig. Args: nbits (`int`, *optional*, defaults to 4): Number of bits. Supported values are (8, 4, 3, 2, 1). group_size (`int`, *optional*, defaults to 64): Group-size value. Supported values are any value that is divisible by weight.shape[axis]). view_as_float (`bool`, *optional*, defaults to `False`): View the quantized weight as float (used in distributed training) if set to `True`. axis (`Optional[int]`, *optional*): Axis along which grouping is performed. Supported values are 0 or 1. dynamic_config (dict, *optional*): Parameters for dynamic configuration. The key is the name tag of the layer and the value is a quantization config. If set, each layer specified by its id will use its dedicated quantization configuration. skip_modules (`list[str]`, *optional*, defaults to `['lm_head']`): List of `nn.Linear` layers to skip. kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """ def __init__( self, nbits: int = 4, group_size: int = 64, view_as_float: bool = False, axis: int | None = None, dynamic_config: dict | None = None, skip_modules: list[str] = ["lm_head"], **kwargs, ): if is_hqq_available(): from hqq.core.quantize import BaseQuantizeConfig as HQQBaseQuantizeConfig else: raise ImportError( "A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`." ) if axis is None: axis = 1 logger.info("Setting axis=1 as faster backends such as TorchAO or BitBlas are only compatible with it.") if axis not in [0, 1]: raise ValueError("Invalid axis value. Only 0 and 1 are allowed.") if dynamic_config is not None: self.quant_config = {} for key in dynamic_config: self.quant_config[key] = HQQBaseQuantizeConfig(**dynamic_config[key]) else: self.quant_config = HQQBaseQuantizeConfig( nbits=nbits, group_size=group_size, view_as_float=view_as_float, axis=axis ) self.quant_method = QuantizationMethod.HQQ self.skip_modules = skip_modules self.post_init() def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ @classmethod def from_dict(cls, config: dict[str, Any]): """ Override from_dict, used in AutoQuantizationConfig.from_dict in quantizers/auto.py """ instance = cls() instance.quant_config = config["quant_config"] instance.skip_modules = config["skip_modules"] return instance def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. """ return { "quant_config": self.quant_config, "quant_method": self.quant_method, "skip_modules": self.skip_modules, } def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> dict[str, Any]: """ Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, """ config_dict = self.to_dict() # get the default config dict default_config_dict = HqqConfig().to_dict() serializable_config_dict = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict @dataclass
HqqConfig
python
great-expectations__great_expectations
docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py
{ "start": 4983, "end": 15901 }
class ____(ColumnMapExpectation): # </snippet> # <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py docstring"> """Expect values in this column to equal 3.""" # </snippet> # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. # <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py examples"> examples = [ { "data": { "all_threes": [3, 3, 3, 3, 3], "some_zeroes": [3, 3, 3, 0, 0], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_threes"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_zeroes", "mostly": 0.8}, "out": { "success": False, }, }, ], } ] # </snippet> # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. # <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py map_metric"> map_metric = "column_values.equal_three" # </snippet> # This is a list of parameter names that can affect whether the Expectation evaluates to True or False # Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys # for more information about domain and success keys, and other arguments to Expectations success_keys = ("mostly",) @renderer(renderer_type="renderer.diagnostic.observed_value") @render_suite_parameter_string def _diagnostic_observed_value_renderer( cls, configuration: ExpectationConfiguration = None, result: ExpectationValidationResult = None, runtime_configuration: Optional[dict] = None, **kwargs, ): assert result, "Must provide a result object." result_dict = result.result if result_dict is None: return "--" if result_dict.get("observed_value"): observed_value = result_dict.get("observed_value") if isinstance(observed_value, (int, float)) and not isinstance( observed_value, bool ): return num_to_str(observed_value, precision=10, use_locale=True) return str(observed_value) elif result_dict.get("unexpected_percent") is not None: return ( num_to_str(result_dict.get("unexpected_percent"), precision=5) + "% unexpected" ) else: return "--" @renderer(renderer_type="renderer.diagnostic.unexpected_statement") @render_suite_parameter_string def _diagnostic_unexpected_statement_renderer( cls, configuration: ExpectationConfiguration = None, result: ExpectationValidationResult = None, runtime_configuration: Optional[dict] = None, **kwargs, ): assert result, "Must provide a result object." success = result.success result = result.result if result.exception_info["raised_exception"]: exception_message_template_str = ( "\n\n$expectation_type raised an exception:\n$exception_message" ) exception_message = RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": exception_message_template_str, "params": { "expectation_type": result.expectation_config.type, "exception_message": result.exception_info[ "exception_message" ], }, "tag": "strong", "styling": { "classes": ["text-danger"], "params": { "exception_message": {"tag": "code"}, "expectation_type": { "classes": ["badge", "badge-danger", "mb-2"] }, }, }, }, } ) exception_traceback_collapse = CollapseContent( **{ "collapse_toggle_link": "Show exception traceback...", "collapse": [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": result.exception_info[ "exception_traceback" ], "tag": "code", }, } ) ], } ) return [exception_message, exception_traceback_collapse] if success or not result_dict.get("unexpected_count"): return [] else: unexpected_count = num_to_str( result_dict["unexpected_count"], use_locale=True, precision=20 ) unexpected_percent = ( num_to_str(result_dict["unexpected_percent"], precision=4) + "%" ) element_count = num_to_str( result_dict["element_count"], use_locale=True, precision=20 ) template_str = ( "\n\n$unexpected_count unexpected values found. " "$unexpected_percent of $element_count total rows." ) return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": template_str, "params": { "unexpected_count": unexpected_count, "unexpected_percent": unexpected_percent, "element_count": element_count, }, "tag": "strong", "styling": {"classes": ["text-danger"]}, }, } ) ] @renderer(renderer_type="renderer.diagnostic.unexpected_table") @render_suite_parameter_string def _diagnostic_unexpected_table_renderer( # noqa: C901, PLR0912 cls, configuration: ExpectationConfiguration = None, result: ExpectationValidationResult = None, runtime_configuration: Optional[dict] = None, **kwargs, ): try: result_dict = result.result except KeyError: return None if result_dict is None: return None if not result_dict.get("partial_unexpected_list") and not result_dict.get( "partial_unexpected_counts" ): return None table_rows = [] if result_dict.get("partial_unexpected_counts"): total_count = 0 for unexpected_count_dict in result_dict.get("partial_unexpected_counts"): value = unexpected_count_dict.get("value") count = unexpected_count_dict.get("count") total_count += count if value is not None and value != "": table_rows.append([value, count]) elif value == "": table_rows.append(["EMPTY", count]) else: table_rows.append(["null", count]) if total_count == result_dict.get("unexpected_count"): header_row = ["Unexpected Value", "Count"] else: header_row = ["Sampled Unexpected Values"] table_rows = [[row[0]] for row in table_rows] else: header_row = ["Sampled Unexpected Values"] sampled_values_set = set() for unexpected_value in result_dict.get("partial_unexpected_list"): if unexpected_value: string_unexpected_value = str(unexpected_value) elif unexpected_value == "": string_unexpected_value = "EMPTY" else: string_unexpected_value = "null" if string_unexpected_value not in sampled_values_set: table_rows.append([unexpected_value]) sampled_values_set.add(string_unexpected_value) unexpected_table_content_block = RenderedTableContent( **{ "content_block_type": "table", "table": table_rows, "header_row": header_row, "styling": { "body": {"classes": ["table-bordered", "table-sm", "mt-3"]} }, } ) return unexpected_table_content_block # This dictionary contains metadata for display in the public gallery # <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py library_metadata"> library_metadata = { "tags": ["extremely basic math"], "contributors": ["@joegargery"], } # </snippet> if __name__ == "__main__": # <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py diagnostics"> ExpectColumnValuesToEqualThree().print_diagnostic_checklist() # </snippet> # Note to users: code below this line is only for integration testing -- ignore! diagnostics = ExpectColumnValuesToEqualThree().run_diagnostics() for check in diagnostics["tests"]: assert check["test_passed"] is True assert check["error_diagnostics"] is None for check in diagnostics["errors"]: assert check is None for check in diagnostics["maturity_checklist"]["experimental"]: if check["message"] == "Passes all linting checks": continue assert check["passed"] is True
ExpectColumnValuesToEqualThree
python
dask__dask
dask/dataframe/io/demo.py
{ "start": 2989, "end": 3452 }
class ____: """Properties of the dataframe DatetimeIndex Notes ----- This API is still experimental, and will likely change in the future""" dtype: str | type = int """Index dtype""" start: str | None = None """First value of the index""" freq: str = "1H" """Frequency for the index ("1H", "1D", etc.)""" partition_freq: str | None = None """Partition frequency ("1D", "1M", etc.)""" @dataclass
DatetimeIndexSpec
python
readthedocs__readthedocs.org
readthedocs/embed/tests/test_api.py
{ "start": 419, "end": 1387 }
class ____: @pytest.fixture(autouse=True) def setup_method(self, settings): self.project = get( Project, language="en", main_language_project=None, slug="project", privacy_level=PUBLIC, ) self.version = self.project.versions.get(slug=LATEST) self.version.privacy_level = PUBLIC self.version.save() settings.PUBLIC_DOMAIN = "readthedocs.io" settings.RTD_DEFAULT_FEATURES = dict( [RTDProductFeature(TYPE_EMBED_API).to_item()] ) def get(self, client, *args, **kwargs): """Wrapper around ``client.get`` to be overridden in the proxied api tests.""" return client.get(*args, **kwargs) def test_is_deprecated(self, client): response = self.get( client=client, path=reverse("embed_api"), ) assert response.status_code == status.HTTP_410_GONE
BaseTestEmbedAPI
python
facebookresearch__faiss
tests/torch_test_contrib.py
{ "start": 14082, "end": 14865 }
class ____(unittest.TestCase): def test_python_product_quantization(self): """ Test the python implementation of product quantization """ d = 64 n = 10000 cs = 4 nbits = 8 M = 4 x = np.random.random(size=(n, d)).astype('float32') pq = faiss.ProductQuantizer(d, cs, nbits) pq.train(x) codes = pq.compute_codes(x) x2 = pq.decode(codes) diff = ((x - x2)**2).sum() # vs pure pytorch impl xt = torch.from_numpy(x) my_pq = quantization.ProductQuantizer(d, M, nbits) my_pq.train(xt) my_codes = my_pq.encode(xt) xt2 = my_pq.decode(my_codes) my_diff = ((xt - xt2)**2).sum() self.assertLess(abs(diff - my_diff), 100)
TestQuantization
python
dagster-io__dagster
python_modules/dagster/dagster/_core/errors.py
{ "start": 21590, "end": 21732 }
class ____(DagsterError): """Indicates you attempted to create a pipeline run with a nonexistent snapshot id."""
DagsterSnapshotDoesNotExist
python
pypa__warehouse
tests/unit/admin/views/test_users.py
{ "start": 19777, "end": 23053 }
class ____: def test_resets_password(self, db_request, monkeypatch): user = UserFactory.create() db_request.matchdict["username"] = str(user.username) db_request.params = {"username": user.username} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar") db_request.user = UserFactory.create() service = pretend.stub( find_userid=pretend.call_recorder(lambda username: user.username), disable_password=pretend.call_recorder( lambda userid, request, reason: None ), ) db_request.find_service = pretend.call_recorder(lambda iface, context: service) send_email = pretend.call_recorder(lambda *a, **kw: None) monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email) result = views.user_reset_password(user, db_request) assert db_request.find_service.calls == [ pretend.call(IUserService, context=None) ] assert send_email.calls == [pretend.call(db_request, user)] assert service.disable_password.calls == [ pretend.call(user.id, db_request, reason=DisableReason.AdminInitiated) ] assert db_request.route_path.calls == [ pretend.call("admin.user.detail", username=user.username) ] assert result.status_code == 303 assert result.location == "/foobar" def test_resets_password_bad_confirm(self, db_request, monkeypatch): user = UserFactory.create() db_request.matchdict["username"] = str(user.username) db_request.params = {"username": "wrong"} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar") db_request.user = UserFactory.create() service = pretend.stub( find_userid=pretend.call_recorder(lambda username: user.username), disable_password=pretend.call_recorder(lambda userid, reason: None), ) db_request.find_service = pretend.call_recorder(lambda iface, context: service) send_email = pretend.call_recorder(lambda *a, **kw: None) monkeypatch.setattr(views, "send_password_reset_by_admin_email", send_email) result = views.user_reset_password(user, db_request) assert db_request.find_service.calls == [] assert send_email.calls == [] assert service.disable_password.calls == [] assert db_request.route_path.calls == [ pretend.call("admin.user.detail", username=user.username) ] assert result.status_code == 303 assert result.location == "/foobar" def test_user_reset_password_redirects_actual_name(self, db_request): user = UserFactory.create(username="wu-tang") db_request.matchdict["username"] = "Wu-Tang" db_request.current_route_path = pretend.call_recorder( lambda username: "/user/the-redirect/" ) result = views.user_reset_password(user, db_request) assert isinstance(result, HTTPMovedPermanently) assert result.headers["Location"] == "/user/the-redirect/" assert db_request.current_route_path.calls == [ pretend.call(username=user.username) ]
TestUserResetPassword
python
facebookresearch__faiss
benchs/distributed_ondisk/combined_index.py
{ "start": 247, "end": 4914 }
class ____: """ combines a set of inverted lists into a hstack masks part of those lists adds these inverted lists to an empty index that contains the info on how to perform searches """ def __init__(self, invlist_fnames, empty_index_fname, masked_index_fname=None): self.indexes = indexes = [] ilv = faiss.InvertedListsPtrVector() for fname in invlist_fnames: if os.path.exists(fname): print('reading', fname, end='\r', flush=True) index = faiss.read_index(fname) indexes.append(index) il = faiss.extract_index_ivf(index).invlists else: raise AssertionError ilv.push_back(il) print() self.big_il = faiss.VStackInvertedLists(ilv.size(), ilv.data()) if masked_index_fname: self.big_il_base = self.big_il print('loading', masked_index_fname) self.masked_index = faiss.read_index( masked_index_fname, faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY) self.big_il = faiss.MaskedInvertedLists( faiss.extract_index_ivf(self.masked_index).invlists, self.big_il_base) print('loading empty index', empty_index_fname) self.index = faiss.read_index(empty_index_fname) ntotal = self.big_il.compute_ntotal() print('replace invlists') index_ivf = faiss.extract_index_ivf(self.index) index_ivf.replace_invlists(self.big_il, False) index_ivf.ntotal = self.index.ntotal = ntotal index_ivf.parallel_mode = 1 # seems reasonable to do this all the time quantizer = faiss.downcast_index(index_ivf.quantizer) quantizer.hnsw.efSearch = 1024 ############################################################ # Expose fields and functions of the index as methods so that they # can be called by RPC def search(self, x, k): return self.index.search(x, k) def range_search(self, x, radius): return self.index.range_search(x, radius) def transform_and_assign(self, xq): index = self.index if isinstance(index, faiss.IndexPreTransform): assert index.chain.size() == 1 vt = index.chain.at(0) xq = vt.apply_py(xq) # perform quantization index_ivf = faiss.extract_index_ivf(index) quantizer = index_ivf.quantizer coarse_dis, list_nos = quantizer.search(xq, index_ivf.nprobe) return xq, list_nos, coarse_dis def ivf_search_preassigned(self, xq, list_nos, coarse_dis, k): index_ivf = faiss.extract_index_ivf(self.index) n, d = xq.shape assert d == index_ivf.d n2, d2 = list_nos.shape assert list_nos.shape == coarse_dis.shape assert n2 == n assert d2 == index_ivf.nprobe D = np.empty((n, k), dtype='float32') I = np.empty((n, k), dtype='int64') index_ivf.search_preassigned( n, faiss.swig_ptr(xq), k, faiss.swig_ptr(list_nos), faiss.swig_ptr(coarse_dis), faiss.swig_ptr(D), faiss.swig_ptr(I), False) return D, I def ivf_range_search_preassigned(self, xq, list_nos, coarse_dis, radius): index_ivf = faiss.extract_index_ivf(self.index) n, d = xq.shape assert d == index_ivf.d n2, d2 = list_nos.shape assert list_nos.shape == coarse_dis.shape assert n2 == n assert d2 == index_ivf.nprobe res = faiss.RangeSearchResult(n) index_ivf.range_search_preassigned( n, faiss.swig_ptr(xq), radius, faiss.swig_ptr(list_nos), faiss.swig_ptr(coarse_dis), res) lims = faiss.rev_swig_ptr(res.lims, n + 1).copy() nd = int(lims[-1]) D = faiss.rev_swig_ptr(res.distances, nd).copy() I = faiss.rev_swig_ptr(res.labels, nd).copy() return lims, D, I def set_nprobe(self, nprobe): index_ivf = faiss.extract_index_ivf(self.index) index_ivf.nprobe = nprobe def set_parallel_mode(self, pm): index_ivf = faiss.extract_index_ivf(self.index) index_ivf.parallel_mode = pm def get_ntotal(self): return self.index.ntotal def set_prefetch_nthread(self, nt): for idx in self.indexes: il = faiss.downcast_InvertedLists( faiss.extract_index_ivf(idx).invlists) il.prefetch_nthread il.prefetch_nthread = nt def set_omp_num_threads(self, nt): faiss.omp_set_num_threads(nt)
CombinedIndex
python
readthedocs__readthedocs.org
readthedocs/oauth/tasks.py
{ "start": 9958, "end": 29237 }
class ____: """ Handle GitHub App webhooks. All event handlers try to create the installation object if it doesn't exist in our database, except for events related to the installation being deleted or suspended. This guarantees that our application can easily recover if we missed an event in case our application or GitHub were down. """ def __init__(self, data: dict, event: str): self.data = data self.event = event self.event_handlers = { "installation": self._handle_installation_event, "installation_repositories": self._handle_installation_repositories_event, "installation_target": self._handle_installation_target_event, "push": self._handle_push_event, "pull_request": self._handle_pull_request_event, "repository": self._handle_repository_event, "organization": self._handle_organization_event, "member": self._handle_member_event, "github_app_authorization": self._handle_github_app_authorization_event, } @cached_property def gh_app_client(self): return get_gh_app_client() def handle(self): # Most of the events have an installation object and action. installation_id = self.data.get("installation", {}).get("id", "unknown") action = self.data.get("action", "unknown") structlog.contextvars.bind_contextvars( installation_id=installation_id, action=action, event=self.event, ) if self.event not in self.event_handlers: log.debug("Unsupported event") raise ValueError(f"Unsupported event: {self.event}") log.info("Handling event") self.event_handlers[self.event]() def _handle_installation_event(self): """ Handle the installation event. Triggered when a user installs or uninstalls the GitHub App under an account (user or organization). We create the installation object and sync the repositories, or delete the installation accordingly. Payload example: .. code-block:: json { "action": "created", "installation": { "id": 1234, "client_id": "12345", "account": { "login": "user", "id": 12345, "type": "User" }, "repository_selection": "selected", "html_url": "https://github.com/settings/installations/1234", "app_id": 12345, "app_slug": "app-slug", "target_id": 12345, "target_type": "User", "permissions": { "contents": "read", "metadata": "read", "pull_requests": "write", "statuses": "write" }, "events": [ "create", "delete", "public", "pull_request", "push" ], "created_at": "2025-01-29T12:04:11.000-05:00", "updated_at": "2025-01-29T12:04:12.000-05:00", "single_file_name": null, "has_multiple_single_files": false, "single_file_paths": [], "suspended_by": null, "suspended_at": null }, "repositories": [ { "id": 770738492, "name": "test-public", "full_name": "user/test-public", "private": false }, { "id": 917825438, "name": "test-private", "full_name": "user/test-private", "private": true } ], "requester": null, "sender": { "login": "user", "id": 1234, "type": "User" } } See https://docs.github.com/en/webhooks/webhook-events-and-payloads#installation. """ action = self.data["action"] gh_installation = self.data["installation"] installation_id = gh_installation["id"] if action in ["created", "unsuspended"]: installation, created = self._get_or_create_installation() # If the installation was just created, we already synced the repositories. if created: return installation.service.sync() if action in ["deleted", "suspended"]: # NOTE: When an installation is deleted/suspended, it doesn't trigger an installation_repositories event. # So we need to call the delete method explicitly here, so we delete its repositories. installation = GitHubAppInstallation.objects.filter( installation_id=installation_id ).first() if installation: installation.delete() log.info("Installation deleted") else: log.info("Installation not found") return # Ignore other actions: # - new_permissions_accepted: We don't need to do anything here for now. return def _handle_installation_repositories_event(self): """ Handle the installation_repositories event. Triggered when a repository is added or removed from an installation. If the installation had access to a repository, and the repository is deleted, this event will be triggered. When a repository is deleted, we delete its remote repository object, but only if it's not linked to any project. Payload example: .. code-block:: json { "action": "added", "installation": { "id": 1234, "client_id": "1234", "account": { "login": "user", "id": 12345, "type": "User" }, "repository_selection": "selected", "html_url": "https://github.com/settings/installations/60240360", "app_id": 12345, "app_slug": "app-slug", "target_id": 12345, "target_type": "User", "permissions": { "contents": "read", "metadata": "read", "pull_requests": "write", "statuses": "write" }, "events": ["create", "delete", "public", "pull_request", "push"], "created_at": "2025-01-29T12:04:11.000-05:00", "updated_at": "2025-01-29T16:05:45.000-05:00", "single_file_name": null, "has_multiple_single_files": false, "single_file_paths": [], "suspended_by": null, "suspended_at": null }, "repository_selection": "selected", "repositories_added": [ { "id": 258664698, "name": "test-public", "full_name": "user/test-public", "private": false } ], "repositories_removed": [], "requester": null, "sender": { "login": "user", "id": 4975310, "type": "User" } } See https://docs.github.com/en/webhooks/webhook-events-and-payloads#installation_repositories. """ action = self.data["action"] installation, created = self._get_or_create_installation() # If we didn't have the installation, all repositories were synced on creation. if created: return if action == "added": if self.data["repository_selection"] == "all": installation.service.sync() else: installation.service.update_or_create_repositories( [repo["id"] for repo in self.data["repositories_added"]] ) return if action == "removed": installation.delete_repositories( [repo["id"] for repo in self.data["repositories_removed"]] ) return # NOTE: this should never happen. raise ValueError(f"Unsupported action: {action}") def _handle_installation_target_event(self): """ Handle the installation_target event. Triggered when the target of an installation changes, like when the user or organization changes its username/slug. .. note:: Looks like this is only triggered when a username is changed, when an organization is renamed, it doesn't trigger this event (maybe a bug?). When this happens, we re-sync all the repositories, so they use the new name. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#installation_target. """ installation, created = self._get_or_create_installation() # If we didn't have the installation, all repositories were synced on creation. if created: return installation.service.sync() def _handle_repository_event(self): """ Handle the repository event. Triggered when a repository is created, deleted, or updated. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#repository. """ action = self.data["action"] installation, created = self._get_or_create_installation() # If the installation was just created, we already synced the repositories. if created: return if action in ("edited", "privatized", "publicized", "renamed", "transferred"): installation.service.update_or_create_repositories([self.data["repository"]["id"]]) return # Ignore other actions: # - created: If the user granted access to all repositories, # GH will trigger an installation_repositories event. # - deleted: If the repository was linked to an installation, # GH will be trigger an installation_repositories event. # - archived/unarchived: We don't do anything with archived repositories. return def _handle_push_event(self): """ Handle the push event. Triggered when a commit is pushed (including a new branch or tag is created), when a branch or tag is deleted, or when a repository is created from a template. If a new branch or tag is created, we trigger a sync of the versions, if the version already exists, we build it if it's active. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#push. """ created = self.data.get("created", False) deleted = self.data.get("deleted", False) if created or deleted: for project in self._get_projects(): trigger_sync_versions(project) return # If this is a push to an existing branch or tag, # we need to build the version if active. version_name, version_type = parse_version_from_ref(self.data["ref"]) for project in self._get_projects(): build_versions_from_names(project, [VersionInfo(name=version_name, type=version_type)]) def _handle_pull_request_event(self): """ Handle the pull_request event. Triggered when there is activity on a pull request. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#pull_request. """ action = self.data["action"] pr = self.data["pull_request"] external_version_data = ExternalVersionData( id=str(pr["number"]), commit=pr["head"]["sha"], source_branch=pr["head"]["ref"], base_branch=pr["base"]["ref"], ) if action in ("opened", "reopened", "synchronize"): for project in self._get_projects().filter(external_builds_enabled=True): external_version = get_or_create_external_version( project=project, version_data=external_version_data, ) build_external_version(project, external_version) return if action == "closed": # Queue the external version for deletion. for project in self._get_projects(): close_external_version( project=project, version_data=external_version_data, ) return # We don't care about the other actions. return def _handle_organization_event(self): """ Handle the organization event. Triggered when an member is added or removed from an organization, or when the organization is renamed or deleted. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#organization. """ action = self.data["action"] installation, created = self._get_or_create_installation() # If the installation was just created, we already synced the repositories and organization. if created: return # We need to do a full sync of the repositories if members were added or removed, # this is since we don't know to which repositories the members have access. # GH doesn't send a member event for this. if action in ("member_added", "member_removed"): installation.service.sync() return # NOTE: installation_target should handle this instead? # But I wasn't able to trigger neither of those events when renaming an organization. # Maybe a bug? # If the organization is renamed, we need to sync the repositories, so they use the new name. if action == "renamed": installation.service.sync() return if action == "deleted": # Delete the organization only if it's not linked to any project. # GH sends a repository and installation_repositories events for each repository # when the organization is deleted. # I didn't see GH send the deleted action for the organization event... # But handle it just in case. installation.delete_organization(self.data["organization"]["id"]) return # Ignore other actions: # - member_invited: We don't do anything with invited members. return def _handle_member_event(self): """ Handle the member event. Triggered when a user is added or removed from a repository. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#member. """ action = self.data["action"] installation, created = self._get_or_create_installation() # If we didn't have the installation, all repositories were synced on creation. if created: return if action in ("added", "edited", "removed"): # Sync collaborators installation.service.update_or_create_repositories([self.data["repository"]["id"]]) return # NOTE: this should never happen. raise ValueError(f"Unsupported action: {action}") def _handle_github_app_authorization_event(self): """ Handle the github_app_authorization event. Triggered when a user revokes the authorization of a GitHub App ("log in with GitHub" will no longer work). .. note:: Revoking the authorization of a GitHub App does not uninstall the GitHub App, it only revokes the OAuth2 token. See https://docs.github.com/en/webhooks/webhook-events-and-payloads#github_app_authorization. """ # A GitHub App receives this webhook by default and cannot unsubscribe from this event. # We don't need to do anything here for now. def _get_projects(self): """ Get all projects linked to the repository that triggered the event. .. note:: This should only be used for events that have a repository object. """ remote_repository = self._get_remote_repository() if not remote_repository: return Project.objects.none() return remote_repository.projects.all() def _get_remote_repository(self): """ Get the remote repository from the request data. If the repository doesn't exist, return None. .. note:: This should only be used for events that have a repository object. """ remote_id = self.data["repository"]["id"] installation, _ = self._get_or_create_installation() return installation.repositories.filter(remote_id=str(remote_id)).first() def _get_or_create_installation(self, sync_repositories_on_create: bool = True): """ Get or create the GitHub App installation. If the installation didn't exist, and `sync_repositories_on_create` is True, we sync the repositories. """ data = self.data.copy() # All webhook payloads should have an installation object. gh_installation = self.data["installation"] installation_id = gh_installation["id"] # These fields are not always present in all payloads. target_id = gh_installation.get("target_id") target_type = gh_installation.get("target_type") # If they aren't present, fetch them from the API, # so we can create the installation object if needed. if not target_id or not target_type: log.debug("Incomplete installation object, fetching from the API") gh_installation = self.gh_app_client.get_app_installation(installation_id) target_id = gh_installation.target_id target_type = gh_installation.target_type data["installation"] = gh_installation.raw_data ( installation, created, ) = GitHubAppInstallation.objects.get_or_create_installation( installation_id=installation_id, target_id=target_id, target_type=target_type, extra_data=data, ) if created and sync_repositories_on_create: installation.service.sync() return installation, created @app.task(queue="web") def handle_github_app_webhook(data: dict, event: str, event_id: str = "unknown"): """ Handle GitHub App webhooks asynchronously. :param data: The webhook payload data. :param event: The event type of the webhook. """ structlog.contextvars.bind_contextvars( event=event, event_id=event_id, ) log.info("Handling GitHub App webhook") handler = GitHubAppWebhookHandler(data, event) handler.handle()
GitHubAppWebhookHandler
python
django__django
django/core/management/base.py
{ "start": 23489, "end": 24904 }
class ____(BaseCommand): """ A management command which takes one or more arbitrary arguments (labels) on the command line, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_label()``, which will be called once for each label. If the arguments should be names of installed applications, use ``AppCommand`` instead. """ label = "label" missing_args_message = "Enter at least one %s." def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.missing_args_message == LabelCommand.missing_args_message: self.missing_args_message = self.missing_args_message % self.label def add_arguments(self, parser): parser.add_argument("args", metavar=self.label, nargs="+") def handle(self, *labels, **options): output = [] for label in labels: label_output = self.handle_label(label, **options) if label_output: output.append(label_output) return "\n".join(output) def handle_label(self, label, **options): """ Perform the command's actions for ``label``, which will be the string as given on the command line. """ raise NotImplementedError( "subclasses of LabelCommand must provide a handle_label() method" )
LabelCommand
python
pydantic__pydantic
tests/plugin/example_plugin.py
{ "start": 159, "end": 478 }
class ____: def on_enter(self, *args, **kwargs) -> None: log.append(f'on_enter args={args} kwargs={kwargs}') def on_success(self, result) -> None: log.append(f'on_success result={result}') def on_error(self, error) -> None: log.append(f'on_error error={error}')
ValidatePythonHandler
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/loading.py
{ "start": 201, "end": 704 }
class ____(Widget): """A loading indicator that doesn't animate.""" DEFAULT_CSS = """ SimpleLoadingIndicator { width: 100%; height: 100%; min-height: 1; content-align: center middle; color: $secondary; } SimpleLoadingIndicator.-textual-loading-indicator { layer: _loading; background: $boost; dock: top; } """ def render(self) -> RenderableType: return "Loading!"
SimpleLoadingIndicator
python
Pylons__pyramid
tests/test_viewderivers.py
{ "start": 59641, "end": 63103 }
class ____(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): self.config = None testing.tearDown() def test_right_order_user_sorted(self): from pyramid.interfaces import IViewDerivers self.config.add_view_deriver(None, 'deriv1') self.config.add_view_deriver( None, 'deriv2', 'decorated_view', 'deriv1' ) self.config.add_view_deriver(None, 'deriv3', 'deriv2', 'deriv1') derivers = self.config.registry.getUtility(IViewDerivers) derivers_sorted = derivers.sorted() dlist = [d for (d, _) in derivers_sorted] self.assertEqual( [ 'secured_view', 'csrf_view', 'owrapped_view', 'http_cached_view', 'decorated_view', 'deriv2', 'deriv3', 'deriv1', 'rendered_view', 'mapped_view', ], dlist, ) def test_right_order_implicit(self): from pyramid.interfaces import IViewDerivers self.config.add_view_deriver(None, 'deriv1') self.config.add_view_deriver(None, 'deriv2') self.config.add_view_deriver(None, 'deriv3') derivers = self.config.registry.getUtility(IViewDerivers) derivers_sorted = derivers.sorted() dlist = [d for (d, _) in derivers_sorted] self.assertEqual( [ 'secured_view', 'csrf_view', 'owrapped_view', 'http_cached_view', 'decorated_view', 'deriv3', 'deriv2', 'deriv1', 'rendered_view', 'mapped_view', ], dlist, ) def test_right_order_under_rendered_view(self): from pyramid.interfaces import IViewDerivers self.config.add_view_deriver( None, 'deriv1', 'rendered_view', 'mapped_view' ) derivers = self.config.registry.getUtility(IViewDerivers) derivers_sorted = derivers.sorted() dlist = [d for (d, _) in derivers_sorted] self.assertEqual( [ 'secured_view', 'csrf_view', 'owrapped_view', 'http_cached_view', 'decorated_view', 'rendered_view', 'deriv1', 'mapped_view', ], dlist, ) def test_right_order_under_rendered_view_others(self): from pyramid.interfaces import IViewDerivers self.config.add_view_deriver( None, 'deriv1', 'rendered_view', 'mapped_view' ) self.config.add_view_deriver(None, 'deriv2') self.config.add_view_deriver(None, 'deriv3') derivers = self.config.registry.getUtility(IViewDerivers) derivers_sorted = derivers.sorted() dlist = [d for (d, _) in derivers_sorted] self.assertEqual( [ 'secured_view', 'csrf_view', 'owrapped_view', 'http_cached_view', 'decorated_view', 'deriv3', 'deriv2', 'rendered_view', 'deriv1', 'mapped_view', ], dlist, )
TestDerivationOrder
python
spyder-ide__spyder
spyder/utils/syntaxhighlighters.py
{ "start": 28265, "end": 29069 }
class ____(PythonSH): """Cython Syntax Highlighter""" ADDITIONAL_KEYWORDS = [ "cdef", "ctypedef", "cpdef", "inline", "cimport", "extern", "include", "begin", "end", "by", "gil", "nogil", "const", "public", "readonly", "fused", "static", "api", "DEF", "IF", "ELIF", "ELSE"] ADDITIONAL_BUILTINS = C_TYPES.split() + [ "array", "bint", "Py_ssize_t", "intern", "reload", "sizeof", "NULL"] PROG = re.compile(make_python_patterns(ADDITIONAL_KEYWORDS, ADDITIONAL_BUILTINS), re.S) IDPROG = re.compile(r"\s+([\w\.]+)", re.S) #============================================================================== # Enaml syntax highlighter #==============================================================================
CythonSH
python
cherrypy__cherrypy
cherrypy/test/test_iterator.py
{ "start": 1326, "end": 1407 }
class ____(OurIterator): close = 'close' # not callable!
OurUnclosableIterator
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsNone1.py
{ "start": 2040, "end": 2338 }
class ____: def __init__(self, parent: Self | None) -> None: self.parent = parent def get_depth(self) -> int: current: Self | None = self count = 0 while current is not None: count += 1 current = current.parent return count - 1
A
python
pennersr__django-allauth
allauth/socialaccount/providers/saml/views.py
{ "start": 2002, "end": 5000 }
class ____(SAMLViewMixin, View): def dispatch(self, request, organization_slug): provider = self.get_provider(organization_slug) acs_session = LoginSession(request, "saml_acs_session", "saml-acs-session") acs_request = None acs_request_data = acs_session.store.get("request") if acs_request_data: acs_request = httpkit.deserialize_request(acs_request_data, HttpRequest()) acs_session.delete() if not acs_request: logger.error("Unable to finish login, SAML ACS session missing") return render_authentication_error(request, provider) auth = build_auth(acs_request, provider) error_reason = None errors = [] try: # We're doing the check for a valid `InResponeTo` ourselves later on # (*) by checking if there is a matching state stashed. auth.process_response(request_id=None) except binascii.Error: errors = ["invalid_response"] error_reason = "Invalid response" except OneLogin_Saml2_Error as e: errors = ["error"] error_reason = str(e) if not errors: errors = auth.get_errors() if errors: # e.g. ['invalid_response'] error_reason = auth.get_last_error_reason() or error_reason logger.error( "Error processing SAML ACS response: %s: %s" % (", ".join(errors), error_reason) ) return render_authentication_error( request, provider, extra_context={ "saml_errors": errors, "saml_last_error_reason": error_reason, }, ) if not auth.is_authenticated(): return render_authentication_error( request, provider, error=AuthError.CANCELLED ) login = provider.sociallogin_from_response(request, auth) # (*) If we (the SP) initiated the login, there should be a matching # state. state_id = auth.get_last_response_in_response_to() if state_id: login.state = provider.unstash_redirect_state(request, state_id) else: # IdP initiated SSO reject = provider.app.settings.get("advanced", {}).get( "reject_idp_initiated_sso", True ) if reject: logger.error("IdP initiated SSO rejected") return render_authentication_error(request, provider) next_url = decode_relay_state(acs_request.POST.get("RelayState")) login.state["process"] = AuthProcess.LOGIN if next_url: login.state["next"] = next_url return complete_social_login(request, login) finish_acs = FinishACSView.as_view() @method_decorator(csrf_exempt, name="dispatch") @method_decorator(login_not_required, name="dispatch")
FinishACSView
python
davidhalter__jedi
jedi/inference/value/module.py
{ "start": 2011, "end": 4717 }
class ____(SubModuleDictMixin): _module_name_class = ModuleName def get_filters(self, origin_scope=None): yield MergedFilter( ParserTreeFilter( parent_context=self.as_context(), origin_scope=origin_scope ), GlobalNameFilter(self.as_context()), ) yield DictFilter(self.sub_modules_dict()) yield DictFilter(self._module_attributes_dict()) yield from self.iter_star_filters() def py__class__(self): c, = values_from_qualified_names(self.inference_state, 'types', 'ModuleType') return c def is_module(self): return True def is_stub(self): return False @property @inference_state_method_cache() def name(self): return self._module_name_class(self, self.string_names[-1]) @inference_state_method_cache() def _module_attributes_dict(self): names = ['__package__', '__doc__', '__name__'] # All the additional module attributes are strings. dct = dict((n, _ModuleAttributeName(self, n)) for n in names) path = self.py__file__() if path is not None: dct['__file__'] = _ModuleAttributeName(self, '__file__', str(path)) return dct def iter_star_filters(self): for star_module in self.star_imports(): f = next(star_module.get_filters(), None) assert f is not None yield f # I'm not sure if the star import cache is really that effective anymore # with all the other really fast import caches. Recheck. Also we would need # to push the star imports into InferenceState.module_cache, if we reenable this. @inference_state_method_cache([]) def star_imports(self): from jedi.inference.imports import Importer modules = [] module_context = self.as_context() for i in self.tree_node.iter_imports(): if i.is_star_import(): new = Importer( self.inference_state, import_path=i.get_paths()[-1], module_context=module_context, level=i.level ).follow() for module in new: if isinstance(module, ModuleValue): modules += module.star_imports() modules += new return modules def get_qualified_names(self): """ A module doesn't have a qualified name, but it's important to note that it's reachable and not `None`. With this information we can add qualified names on top for all value children. """ return ()
ModuleMixin
python
getsentry__sentry
tests/sentry/api/endpoints/test_organization_invite_request_index.py
{ "start": 627, "end": 2304 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-invite-request-index" @cached_property def org(self) -> Organization: return self.create_organization(owner=self.user) def setUp(self) -> None: self.invite_request = self.create_member( email="test@example.com", organization=self.org, role="member", invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value, ) self.request_to_join = self.create_member( email="example@gmail.com", organization=self.org, role="member", invite_status=InviteStatus.REQUESTED_TO_JOIN.value, ) def test_simple(self) -> None: self.login_as(user=self.user) resp = self.get_response(self.org.slug) assert resp.status_code == 200 assert len(resp.data) == 2 assert resp.data[0]["email"] == self.invite_request.email assert resp.data[0]["inviteStatus"] == "requested_to_be_invited" assert resp.data[1]["email"] == self.request_to_join.email assert resp.data[1]["inviteStatus"] == "requested_to_join" def test_join_requests_disabled(self) -> None: OrganizationOption.objects.create( organization_id=self.org.id, key="sentry:join_requests", value=False ) self.login_as(user=self.user) resp = self.get_response(self.org.slug) assert resp.status_code == 200 assert len(resp.data) == 1 assert resp.data[0]["email"] == self.invite_request.email assert resp.data[0]["inviteStatus"] == "requested_to_be_invited"
OrganizationInviteRequestListTest
python
joke2k__faker
tests/providers/test_person.py
{ "start": 54023, "end": 54628 }
class ____(unittest.TestCase): """Tests person in the pt_PT locale.""" def setUp(self): self.fake = Faker("pt_PT") Faker.seed(0) def test_male_first_name(self): first_name_male = self.fake.first_name_male() assert first_name_male in PtPtProvider.first_names_male def test_female_first_name(self): first_name_female = self.fake.first_name_female() assert first_name_female in PtPtProvider.first_names_female def test_last_name(self): last_name = self.fake.last_name() assert last_name in PtPtProvider.last_names
TestPtPt
python
scrapy__scrapy
tests/test_contracts.py
{ "start": 802, "end": 984 }
class ____(Contract): name = "custom_success_contract" def adjust_request_args(self, args): args["url"] = "http://scrapy.org" return args
CustomSuccessContract
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 16734, "end": 17324 }
class ____(Interface): def __call__(self, **kw): """ Return an object which implements :class:`pyramid.interfaces.IViewMapper`. ``kw`` will be a dictionary containing view-specific arguments, such as ``permission``, ``predicates``, ``attr``, ``renderer``, and other items. An IViewMapperFactory is used by :meth:`pyramid.config.Configurator.add_view` to provide a plugpoint to extension developers who want to modify potential view callable invocation signatures and response values. """
IViewMapperFactory
python
realpython__materials
django-view-auth/Blog/core/migrations/0001_initial.py
{ "start": 92, "end": 706 }
class ____(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="Blog", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("title", models.CharField(max_length=50)), ("content", models.TextField()), ], ), ]
Migration
python
getsentry__sentry
src/sentry/sentry_apps/services/app/model.py
{ "start": 5739, "end": 5978 }
class ____(TypedDict, total=False): installation_ids: list[int] app_ids: list[int] organization_id: int uuids: list[str] status: int api_token_id: int api_installation_token_id: int
SentryAppInstallationFilterArgs
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-oci-data-science/llama_index/llms/oci_data_science/client.py
{ "start": 20230, "end": 28833 }
class ____(BaseClient): """ Asynchronous HTTP client for invoking models with support for request and streaming APIs, including retry logic. """ def __init__(self, *args, **kwargs) -> None: """ Initialize the AsyncClient. Args: *args: Positional arguments forwarded to BaseClient. **kwargs: Keyword arguments forwarded to BaseClient. """ super().__init__(*args, **kwargs) self._client = httpx.AsyncClient(timeout=self.timeout) def is_closed(self) -> bool: return self._client.is_closed async def close(self) -> None: """ Close the underlying HTTPX client. The client will *not* be usable after this. """ await self._client.aclose() async def __aenter__(self: _T) -> _T: # noqa: PYI019 return self async def __aexit__( self, exc_type: Optional[type[BaseException]] = None, exc: Optional[BaseException] = None, exc_tb: Optional[TracebackType] = None, ) -> None: await self.close() def __del__(self) -> None: try: if not self._client.is_closed: loop = asyncio.get_event_loop() if loop.is_running(): loop.create_task(self.close()) else: loop.run_until_complete(self.close()) except Exception: pass @_retry_decorator async def _request( self, payload: Dict[str, Any], headers: Optional[Dict[str, str]] = None ) -> Dict[str, Any]: """ Send a POST request to the configured endpoint with retry and error handling. Args: payload (Dict[str, Any]): Parameters for the request payload. headers (Optional[Dict[str, str]]): HTTP headers to include in the request. Returns: Dict[str, Any]: Decoded JSON response. Raises: ExtendedRequestException: Raised when the request fails. """ logger.debug(f"Starting asynchronous request with payload: {payload}") try: response = await self._client.post( self.endpoint, headers=self._prepare_headers(stream=False, headers=headers), auth=self.auth, json=payload, ) logger.debug(f"Received response with status code: {response.status_code}") response.raise_for_status() json_response = response.json() logger.debug(f"Response JSON: {json_response}") return json_response except Exception as e: last_exception_text = ( e.response.text if hasattr(e, "response") and e.response else str(e) ) logger.error( f"Request failed. Error: {e!s}. Details: {last_exception_text}" ) raise ExtendedRequestException( f"Request failed: {e!s}. Details: {last_exception_text}", e, last_exception_text, ) from e async def _stream( self, payload: Dict[str, Any], headers: Optional[Dict[str, str]] = None ) -> AsyncIterator[Mapping[str, Any]]: """ Send a POST request expecting a streaming response with retry logic. Args: payload (Dict[str, Any]): Parameters for the request payload. headers (Optional[Dict[str, str]]): HTTP headers to include in the request. Yields: Mapping[str, Any]: Decoded JSON response line-by-line. Raises: ExtendedRequestException: Raised when the request fails. """ logger.debug(f"Starting asynchronous streaming request with payload: {payload}") last_exception_text = None for attempt in range(1, self.retries + 2): # retries + initial attempt logger.debug(f"Attempt {attempt} for asynchronous streaming request.") try: async with self._client.stream( "POST", self.endpoint, headers=self._prepare_headers(stream=True, headers=headers), auth=self.auth, json={**payload, "stream": True}, ) as response: try: logger.debug( f"Received streaming response with status code: {response.status_code}" ) response.raise_for_status() async for line in response.aiter_lines(): if not line: # Skip empty lines continue parsed_line = self._parse_streaming_line(line) if parsed_line: logger.debug(f"Yielding parsed line: {parsed_line}") yield parsed_line return except Exception as e: if hasattr(e, "response") and e.response: content = await e.response.aread() last_exception_text = content.decode( e.response.encoding or DEFAULT_ENCODING ) raise except Exception as e: if attempt <= self.retries and _should_retry_exception(e): delay = self.backoff_factor * (2 ** (attempt - 1)) logger.warning( f"Streaming attempt {attempt} failed: {e}. Retrying in {delay} seconds..." ) await asyncio.sleep(delay) else: logger.error( f"Streaming request failed. Error: {e!s}. Details: {last_exception_text}" ) raise ExtendedRequestException( f"Streaming request failed: {e!s}. Details: {last_exception_text}", e, last_exception_text, ) from e async def generate( self, prompt: str, payload: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, stream: bool = False, ) -> Union[Dict[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Generate text completion for the given prompt. Args: prompt (str): Input text prompt for the model. payload (Optional[Dict[str, Any]]): Additional parameters for the request payload. headers (Optional[Dict[str, str]]): HTTP headers to include in the request. stream (bool): Whether to use streaming for the response. Returns: Union[Dict[str, Any], AsyncIterator[Mapping[str, Any]]]: A full JSON response or an async iterator for streaming responses. """ logger.debug(f"Generating text with prompt: {prompt}, stream: {stream}") payload = {**(payload or {}), "prompt": prompt} headers = {"route": "/v1/completions", **(headers or {})} if stream: return self._stream(payload=payload, headers=headers) return await self._request(payload=payload, headers=headers) async def chat( self, messages: List[Dict[str, Any]], payload: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, stream: bool = False, ) -> Union[Dict[str, Any], AsyncIterator[Mapping[str, Any]]]: """ Perform a chat interaction with the model. Args: messages (List[Dict[str, Any]]): List of message dictionaries for chat interaction. payload (Optional[Dict[str, Any]]): Additional parameters for the request payload. headers (Optional[Dict[str, str]]): HTTP headers to include in the request. stream (bool): Whether to use streaming for the response. Returns: Union[Dict[str, Any], AsyncIterator[Mapping[str, Any]]]: A full JSON response or an async iterator for streaming responses. """ logger.debug(f"Starting chat with messages: {messages}, stream: {stream}") payload = {**(payload or {}), "messages": messages} headers = {"route": "/v1/chat/completions", **(headers or {})} if stream: return self._stream(payload=payload, headers=headers) return await self._request(payload=payload, headers=headers)
AsyncClient
python
walkccc__LeetCode
solutions/1146. Snapshot Array/1146.py
{ "start": 0, "end": 550 }
class ____: def __init__(self, length: int): self.snaps = [[[0, 0]] for _ in range(length)] self.snap_id = 0 def set(self, index: int, val: int) -> None: snap = self.snaps[index][-1] if snap[0] == self.snap_id: snap[1] = val else: self.snaps[index].append([self.snap_id, val]) def snap(self) -> int: self.snap_id += 1 return self.snap_id - 1 def get(self, index: int, snap_id: int) -> int: i = bisect.bisect_left(self.snaps[index], [snap_id + 1]) - 1 return self.snaps[index][i][1]
SnapshotArray
python
kamyu104__LeetCode-Solutions
Python/closest-fair-integer.py
{ "start": 69, "end": 1092 }
class ____(object): def closestFair(self, n): """ :type n: int :rtype: int """ digits = map(int, str(n)) result = [] if len(digits)%2 == 0: left = [0]*2 for d in digits: left[d%2] += 1 if left[0] == len(digits)//2: return n for i in reversed(xrange(len(digits)//2, len(digits))): left[digits[i]%2] -= 1 right = [len(digits)//2-left[0], len(digits)//2-left[1]] if any(x < 0 for x in right): continue d = digits[i]+1 if right[(digits[i]+1)%2]-1 >= 0 else digits[i]+2 if d > 9: continue right[d%2] -= 1 result = digits[:i]+[d]+[0]*right[0]+[1]*right[1] break if not result: l = len(digits)//2+1 result = [1]+[0]*l+[1]*(l-1) return int("".join(map(str, result)))
Solution
python
PyCQA__pylint
doc/data/messages/a/abstract-class-instantiated/bad.py
{ "start": 13, "end": 151 }
class ____(abc.ABC): @abc.abstractmethod def make_sound(self): pass sheep = Animal() # [abstract-class-instantiated]
Animal
python
modin-project__modin
modin/core/storage_formats/pandas/parsers.py
{ "start": 23991, "end": 24468 }
class ____(NamedTuple): """ Class to store path and row group information for parquet reads. Parameters ---------- path : str, path object or file-like object Name of the file to read. row_group_start : int Row group to start read from. row_group_end : int Row group to stop read. """ path: Any row_group_start: int row_group_end: int @doc(_doc_pandas_parser_class, data_type="PARQUET data")
ParquetFileToRead
python
jina-ai__jina
jina/enums.py
{ "start": 5246, "end": 5381 }
class ____(BetterEnum): """Potential forced network modes""" AUTO = 0 HOST = 1 BRIDGE = 2 NONE = 3
DockerNetworkMode
python
spack__spack
lib/spack/spack/solver/asp.py
{ "start": 4450, "end": 4644 }
class ____: """Enum for the optimization KIND of a criteria. It's not using enum.Enum since it must be serializable. """ BUILD = 0 CONCRETE = 1 OTHER = 2
OptimizationKind
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/generator13.py
{ "start": 1611, "end": 1772 }
class ____(Protocol): async def iter(self) -> AsyncGenerator[bytes, None]: ... async def func7(p: Proto): async for x in await p.iter(): pass
Proto
python
doocs__leetcode
solution/0300-0399/0300.Longest Increasing Subsequence/Solution2.py
{ "start": 0, "end": 388 }
class ____: def __init__(self, n: int): self.n = n self.c = [0] * (n + 1) def update(self, x: int, v: int): while x <= self.n: self.c[x] = max(self.c[x], v) x += x & -x def query(self, x: int) -> int: mx = 0 while x: mx = max(mx, self.c[x]) x -= x & -x return mx
BinaryIndexedTree
python
wandb__wandb
wandb/sdk/internal/internal_api.py
{ "start": 4427, "end": 4567 }
class ____(threading.local): context: context.Context | None def __init__(self) -> None: self.context = None
_ThreadLocalData
python
huggingface__transformers
tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py
{ "start": 3931, "end": 10096 }
class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = MusicgenMelodyFeatureExtractor if is_torchaudio_available() else None def setUp(self): self.feat_extract_tester = MusicgenMelodyFeatureExtractionTester(self) # Copied from tests.models.seamless_m4t.test_feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractionTest.test_feat_extract_from_and_save_pretrained def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) # Copied from tests.models.seamless_m4t.test_feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractionTest.test_feat_extract_to_json_file def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[0] == 3) # Ignore copy self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) @require_torchaudio def test_call_from_demucs(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # (batch_size, num_stems, channel_size, audio_length) inputs = torch.rand([4, 5, 2, 44000]) # Test feature size input_features = feature_extractor(inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[0] == 4) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test single input encoded_sequences_1 = feature_extractor(inputs[[0]], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1[0], input_features[0], atol=1e-3)) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad with input_features->input_features def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def test_integration(self): EXPECTED_INPUT_FEATURES = torch.zeros([2, 8, 12]) EXPECTED_INPUT_FEATURES[0, :6, 9] = 1 EXPECTED_INPUT_FEATURES[0, 6:, 0] = 1 EXPECTED_INPUT_FEATURES[1, :, 9] = 1 input_speech = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)] feature_extractor = MusicgenMelodyFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features self.assertEqual(input_features.shape, (2, 8, 12)) self.assertTrue((input_features == EXPECTED_INPUT_FEATURES).all())
MusicgenMelodyFeatureExtractionTest
python
ansible__ansible
lib/ansible/modules/apt_repository.py
{ "start": 7431, "end": 7573 }
class ____(Exception): pass # Simple version of aptsources.sourceslist.SourcesList. # No advanced logic and no backups inside.
InvalidSource
python
wandb__wandb
wandb/sdk/artifacts/_generated/artifact_used_by.py
{ "start": 545, "end": 790 }
class ____(GQLResult): node: RunInfoFragment ArtifactUsedBy.model_rebuild() ArtifactUsedByArtifact.model_rebuild() ArtifactUsedByArtifactUsedBy.model_rebuild() ArtifactUsedByArtifactUsedByEdges.model_rebuild()
ArtifactUsedByArtifactUsedByEdges
python
apache__airflow
providers/common/sql/tests/unit/common/sql/operators/test_generic_transfer.py
{ "start": 2381, "end": 5270 }
class ____: def teardown_method(self): from airflow.providers.mysql.hooks.mysql import MySqlHook drop_tables = {"test_mysql_to_mysql", "test_airflow"} with closing(MySqlHook().get_conn()) as conn: for table in drop_tables: # Previous version tried to run execute directly on dbapi call, which was accidentally working with closing(conn.cursor()) as cur: cur.execute(f"DROP TABLE IF EXISTS {table}") @pytest.mark.parametrize( "client", [ "mysqlclient", "mysql-connector-python", ], ) def test_mysql_to_mysql(self, client, dag_maker): class MySqlContext: def __init__(self, client): self.client = client self.connection = MySqlHook.get_connection(MySqlHook.default_conn_name) self.init_client = self.connection.extra_dejson.get("client", "mysqlclient") def __enter__(self): self.connection.set_extra(f'{{"client": "{self.client}"}}') def __exit__(self, exc_type, exc_val, exc_tb): self.connection.set_extra(f'{{"client": "{self.init_client}"}}') with MySqlContext(client): sql = "SELECT * FROM connection;" with dag_maker(f"TEST_DAG_ID_{client}", start_date=DEFAULT_DATE): op = GenericTransfer( task_id="test_m2m", preoperator=[ "DROP TABLE IF EXISTS test_mysql_to_mysql", "CREATE TABLE IF NOT EXISTS test_mysql_to_mysql LIKE connection", ], source_conn_id="airflow_db", destination_conn_id="airflow_db", destination_table="test_mysql_to_mysql", sql=sql, ) dag_maker.run_ti(op.task_id) @mock.patch("airflow.providers.common.sql.hooks.sql.DbApiHook.insert_rows") def test_mysql_to_mysql_replace(self, mock_insert, dag_maker): sql = "SELECT * FROM connection LIMIT 10;" with dag_maker("TEST_DAG_ID", start_date=DEFAULT_DATE): op = GenericTransfer( task_id="test_m2m", preoperator=[ "DROP TABLE IF EXISTS test_mysql_to_mysql", "CREATE TABLE IF NOT EXISTS test_mysql_to_mysql LIKE connection", ], source_conn_id="airflow_db", destination_conn_id="airflow_db", destination_table="test_mysql_to_mysql", sql=sql, insert_args={"replace": True}, ) dag_maker.run_ti(op.task_id) assert mock_insert.called _, kwargs = mock_insert.call_args assert "replace" in kwargs @pytest.mark.backend("postgres")
TestMySql
python
haoel__leetcode
algorithms/python/SwimInRisingWater/swim_in_rising_water.py
{ "start": 490, "end": 1552 }
class ____: def swimInWater(self, grid: List[List[int]]) -> int: src, dst = grid[0][0], grid[len(grid)-1][len(grid)-1] visited = set() heap_queue = [(src, 0, 0)] # src, row, col output = 0 directions = [(0, 1), (1, 0), (-1, 0), (0, -1)] while heap_queue: current, row, col = heappop(heap_queue) output = max(current, output) # if we already hit the destination, break out of the loop if current == dst: break for x, y in directions: dx, dy = row+x, col+y if self.check_bounds(dx, dy, grid) and (dx, dy) not in visited: heappush(heap_queue, (grid[dx][dy], dx, dy)) visited.add((dx, dy)) return output def check_bounds(self, r, c, grid) -> bool: if 0 <= r < len(grid[0]) and 0 <= c < len(grid): return True return False if __name__ == "__main__": grid = [[0, 2], [1, 3]] print(Solution().swimInWater(grid))
Solution
python
huggingface__transformers
src/transformers/models/sew/modular_sew.py
{ "start": 9436, "end": 12638 }
class ____(PreTrainedModel): config: SEWConfig base_model_prefix = "sew" main_input_name = "input_values" input_modalities = "audio" supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = False # needs a proper look into the mask creation @torch.no_grad() def _init_weights(self, module): """Initialize the weights""" if isinstance(module, SEWPositionalConvEmbedding): init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) init.constant_(module.conv.bias, 0) elif isinstance(module, nn.Linear): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, nn.Conv1d): if is_deepspeed_zero3_enabled(): import deepspeed if hasattr(module, "weight_v") and hasattr(module, "weight_g"): with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0): init.kaiming_normal_(module.weight) else: with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0): init.kaiming_normal_(module.weight) else: init.kaiming_normal_(module.weight) if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None: init.zeros_(module.bias) def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask @auto_docstring
SEWPreTrainedModel